diff --git a/README.md b/README.md index f9321157..f355021f 100644 --- a/README.md +++ b/README.md @@ -7,24 +7,24 @@ GRPC) ## Key Features -* Instrumentation support with statsd -* Log Sink -* Bigquery Sink +- Instrumentation support with statsd +- Log Sink +- Bigquery Sink Depot is a sink connector, which acts as a bridge between data processing systems and real sink. The APIs in this library can be used to push data to various sinks. Common sinks implementations will be added in this repo. ## Requirements -* java8 or higher -* gradle +- java8 or higher +- gradle ## How to use Explore the following resources to get started -* [Reference](docs/reference) contains details about configurations of metrics and various sinks -* [Contribute](docs/contribute/contribution.md) contains resources for anyone who wants to contribute. +- [Reference](docs/reference) contains details about configurations of metrics and various sinks +- [Contribute](docs/contribute/contribution.md) contains resources for anyone who wants to contribute. ### Build and run tests @@ -47,21 +47,21 @@ $ ./gradlew clean ```xml - io.odpf + org.raystack depot version ``` ```sh -implementation group: 'io.odpf', name: 'depot', version: 'version' +implementation group: 'org.raystack', name: 'depot', version: 'version' ``` ### Usage example: ```java -public interface OdpfSink extends Closeable { - OdpfSinkResponse pushToSink(List messages) throws OdpfSinkException; +public interface Sink extends Closeable { + SinkResponse pushToSink(List messages) throws SinkException; } ``` @@ -87,12 +87,12 @@ class MyClass { ### Data types -Currently, sink connector library is supporting protobuf and Json format. We can set the datatype of `OdpfMessage` by +Currently, sink connector library is supporting protobuf and Json format. We can set the datatype of `Message` by setting `SINK_CONNECTOR_SCHEMA_DATA_TYPE`. Each datatype has parsers which takes care of deserialization. ### Adding a new Sink -Each sink will have to implement `OdpfSink` interface. The pushToSink take a batch of messages and return a response +Each sink will have to implement `Sink` interface. The pushToSink take a batch of messages and return a response with error list. ### Configurations @@ -100,4 +100,5 @@ with error list. Please check the docs folder for details. ## License + Firehose is [Apache 2.0](LICENSE) licensed. diff --git a/build.gradle b/build.gradle index 8619ab4d..72aec049 100644 --- a/build.gradle +++ b/build.gradle @@ -21,8 +21,8 @@ plugins { id "io.github.gradle-nexus.publish-plugin" version "1.1.0" } -group 'io.odpf' -version '0.3.8' +group 'org.raystack' +version '0.4.0' repositories { mavenCentral() @@ -33,13 +33,14 @@ dependencies { implementation group: 'com.google.protobuf', name: 'protobuf-java', version: '3.1.0' implementation group: 'com.datadoghq', name: 'java-dogstatsd-client', version: '2.13.0' implementation group: 'com.google.protobuf', name: 'protobuf-java-util', version: '3.1.0' - implementation group: 'io.odpf', name: 'stencil', version: '0.2.1' exclude group: 'org.slf4j' + implementation group: 'org.raystack', name: 'stencil', version: '0.4.0' exclude group: 'org.slf4j' implementation group: 'org.aeonbits.owner', name: 'owner', version: '1.0.9' - implementation 'com.google.cloud:google-cloud-bigquery:1.115.0' + implementation 'com.google.cloud:google-cloud-bigquerystorage:2.34.2' + implementation 'com.google.cloud:google-cloud-bigquery:2.23.2' implementation (group: 'com.google.cloud', name:'google-cloud-bigtable', version:'2.11.2') { exclude group: "io.grpc" } - implementation "io.grpc:grpc-all:1.38.0" + implementation 'io.grpc:grpc-all:1.53.0' implementation group: 'org.slf4j', name: 'jul-to-slf4j', version: '1.7.35' implementation group: 'redis.clients', name: 'jedis', version: '3.0.1' implementation group: 'org.apache.commons', name: 'commons-lang3', version: '3.5' @@ -93,6 +94,8 @@ idea { testSourceDirs += file("$projectDir/src/generated/test/java") } } +sourceCompatibility = JavaVersion.VERSION_1_8 +targetCompatibility = JavaVersion.VERSION_1_8 protobuf { generatedFilesBaseDir = "$projectDir/src/generated" @@ -142,10 +145,10 @@ publishing { version = project.version name = 'Depot' description = 'A sink connector library containing multiple sink implementations' - url = 'https://github.com/odpf/depot' + url = 'https://github.com/raystack/depot' scm { - url = 'https://github.com/odpf/depot.git' + url = 'https://github.com/raystack/depot.git' } licenses { diff --git a/docs/README.md b/docs/README.md index f3285d8c..4f9d1149 100644 --- a/docs/README.md +++ b/docs/README.md @@ -7,26 +7,26 @@ GRPC) ## Key Features -* Instrumentation support with statsd -* Log Sink -* Bigquery Sink -* Redis Sink -* Bigtable Sink +- Instrumentation support with statsd +- Log Sink +- Bigquery Sink +- Redis Sink +- Bigtable Sink Depot is a sink connector, which acts as a bridge between data processing systems and real sink. The APIs in this library can be used to push data to various sinks. Common sinks implementations will be added in this repo. ## Requirements -* java8 or higher -* gradle +- java8 or higher +- gradle ## How to use Explore the following resources to get started -* [Reference](docs/reference) contains details about configurations of metrics and various sinks -* [Contribute](docs/contribute/contribution.md) contains resources for anyone who wants to contribute. +- [Reference](docs/reference) contains details about configurations of metrics and various sinks +- [Contribute](docs/contribute/contribution.md) contains resources for anyone who wants to contribute. ### Build and run tests @@ -49,21 +49,21 @@ $ ./gradlew clean ```xml - io.odpf + org.raystack depot version ``` ```sh -implementation group: 'io.odpf', name: 'depot', version: 'version' +implementation group: 'org.raystack', name: 'depot', version: 'version' ``` ### Usage example: ```java -public interface OdpfSink extends Closeable { - OdpfSinkResponse pushToSink(List messages) throws OdpfSinkException; +public interface Sink extends Closeable { + SinkResponse pushToSink(List messages) throws SinkException; } ``` @@ -89,12 +89,12 @@ class MyClass { ### Data types -Currently, sink connector library is supporting protobuf and Json format. We can set the datatype of `OdpfMessage` by +Currently, sink connector library is supporting protobuf and Json format. We can set the datatype of `Message` by setting `SINK_CONNECTOR_SCHEMA_DATA_TYPE`. Each datatype has parsers which takes care of deserialization. ### Adding a new Sink -Each sink will have to implement `OdpfSink` interface. The pushToSink take a batch of messages and return a response +Each sink will have to implement `Sink` interface. The pushToSink take a batch of messages and return a response with error list. ### Configurations diff --git a/docs/contribute/contribution.md b/docs/contribute/contribution.md index b49dfaa9..f35e042a 100644 --- a/docs/contribute/contribution.md +++ b/docs/contribute/contribution.md @@ -2,7 +2,7 @@ The following is a set of guidelines for contributing to Depot. These are mostly guidelines, not rules. Use your best judgment, and feel free to propose changes to this document in a pull request. Here are some important resources: -* Github [issues](https://github.com/odpf/depot/issues) track the ongoing and reported issues. +- Github [issues](https://github.com/raystack/depot/issues) track the ongoing and reported issues. Development of Depot happens in the open on GitHub, and we are grateful to the community for contributing bug fixes and improvements. Read below to learn how you can take part in improving Depot. @@ -14,44 +14,42 @@ You don’t have to be a developer to make a contribution. We also need technica The following parts are open for contribution: -* Adding a new functionality -* Improve an existing functionality -* Adding a new sink -* Improve an existing sink -* Provide suggestions to make the user experience better -* Provide suggestions to Improve the documentation - +- Adding a new functionality +- Improve an existing functionality +- Adding a new sink +- Improve an existing sink +- Provide suggestions to make the user experience better +- Provide suggestions to Improve the documentation ## How can I contribute? We use RFCs and GitHub issues to communicate ideas. -* You can report a bug or suggest a feature enhancement or can just ask questions. Reach out on Github discussions for this purpose. -* You are also welcome to add a new sink, improve monitoring and logging and improve code quality. -* You can help with documenting new features or improve existing documentation. -* You can also review and accept other contributions if you are a maintainer. +- You can report a bug or suggest a feature enhancement or can just ask questions. Reach out on Github discussions for this purpose. +- You are also welcome to add a new sink, improve monitoring and logging and improve code quality. +- You can help with documenting new features or improve existing documentation. +- You can also review and accept other contributions if you are a maintainer. -Please submit a PR to the main branch of the `depot` repository once you are ready to submit your contribution. +Please submit a PR to the main branch of the `depot` repository once you are ready to submit your contribution. Code submission \(including a submission from project maintainers\) requires review and approval from maintainers or code owners. -PRs that are submitted by the general public need to pass the build. +PRs that are submitted by the general public need to pass the build. Once the build is passed community members will help to review the pull request. ## Becoming a maintainer We are always interested in adding new maintainers. What we look for is a series of contributions, good taste, and an ongoing interest in the project. -* maintainers will have write-access to the `depot` repositories. -* There is no strict protocol for becoming a maintainer or PMC member. Candidates for new maintainers are typically people that are active contributors and community members. -* Candidates for new maintainers can also be suggested by current maintainers or PMC members. -* If you would like to become a maintainer, you should start contributing in any of the ways mentioned. You might also want to talk to other maintainers and ask for their advice and guidance. +- maintainers will have write-access to the `depot` repositories. +- There is no strict protocol for becoming a maintainer or PMC member. Candidates for new maintainers are typically people that are active contributors and community members. +- Candidates for new maintainers can also be suggested by current maintainers or PMC members. +- If you would like to become a maintainer, you should start contributing in any of the ways mentioned. You might also want to talk to other maintainers and ask for their advice and guidance. ## Guidelines Please follow these practices for your change to get merged fast and smoothly: -* Contributions can only be accepted if they contain appropriate testing \(Unit and Integration Tests\). -* If you are introducing a completely new feature or making any major changes to an existing one, we recommend starting with an RFC and get consensus on the basic design first. -* Make sure your local build is running with all the tests and checkstyle passing. -* If your change is related to user-facing protocols/configurations, you need to make the corresponding change in the documentation as well. -* Docs live in the code repo under [`docs`](https://github.com/odpf/depot/tree/main/docs/README.md) so that changes to that can be done in the same PR as changes to the code. - +- Contributions can only be accepted if they contain appropriate testing \(Unit and Integration Tests\). +- If you are introducing a completely new feature or making any major changes to an existing one, we recommend starting with an RFC and get consensus on the basic design first. +- Make sure your local build is running with all the tests and checkstyle passing. +- If your change is related to user-facing protocols/configurations, you need to make the corresponding change in the documentation as well. +- Docs live in the code repo under [`docs`](https://github.com/raystack/depot/tree/main/docs/README.md) so that changes to that can be done in the same PR as changes to the code. diff --git a/docs/contribute/development.md b/docs/contribute/development.md index 29f2967a..652f70dc 100644 --- a/docs/contribute/development.md +++ b/docs/contribute/development.md @@ -1,10 +1,10 @@ # Development Guide -The following guide will help you quickly run an application which uses depot in your local machine. +The following guide will help you quickly run an application which uses depot in your local machine. The main components of depot sink-connector are: -* Sink: Package which handles sinking data. -* Metrics: Handles the metrics via StatsD client +- Sink: Package which handles sinking data. +- Metrics: Handles the metrics via StatsD client ## Requirements @@ -21,28 +21,32 @@ export PATH=~/Downloads/jdk1.8.0_291/bin:$PATH Environment variables can be configured in the following way - -* run `export SAMPLE_VARIABLE=287` on a UNIX shell, to directly assign the required environment variable. +- run `export SAMPLE_VARIABLE=287` on a UNIX shell, to directly assign the required environment variable. -### Custom application -We need to create an application which has io.odpf.depot as a dependency. -This application will create any sink that a developer wants to test by using sink-factories. -The OdpfSink's APIs can be used to send data to sinks and check the response. +### Custom application + +We need to create an application which has org.raystack.depot as a dependency. +This application will create any sink that a developer wants to test by using sink-factories. +The Sink's APIs can be used to send data to sinks and check the response. One can setup monitoring to see metrics emitted too. + #### Maven and gradle dependency ```xml - io.odpf + org.raystack depot version ``` ```sh -implementation group: 'io.odpf', name: 'depot', version: 'version' +implementation group: 'org.raystack', name: 'depot', version: 'version' ``` + #### Sample Application + ```java class MyClass { void createSink() { @@ -62,8 +66,8 @@ class MyClass { ### Destination Sink Server -The sink to which the application will send data to, must have its corresponding server set up and configured. -The URL and port address of the database server / HTTP/GRPC endpoint , along with other sink - specific parameters +The sink to which the application will send data to, must have its corresponding server set up and configured. +The URL and port address of the database server / HTTP/GRPC endpoint , along with other sink - specific parameters must be configured the environment variables corresponding to that particular sink. Configuration parameter variables of each sink can be found in the [Configurations](../reference/configuration/) section. @@ -71,31 +75,33 @@ Configuration parameter variables of each sink can be found in the [Configuratio ### Schema Registry Stencil Server is used as a Schema Registry for hosting Protobuf descriptors. -The environment variable `SCHEMA_REGISTRY_STENCIL_ENABLE` must be set to `true` . -Stencil server URL must be specified in the variable `SCHEMA_REGISTRY_STENCIL_URLS` . +The environment variable `SCHEMA_REGISTRY_STENCIL_ENABLE` must be set to `true` . +Stencil server URL must be specified in the variable `SCHEMA_REGISTRY_STENCIL_URLS` . The Proto Descriptor Set file of the messages must be uploaded to the Stencil server. -Refer [this guide](https://github.com/odpf/stencil/tree/master/server#readme) on how to set up and configure the Stencil server, and how to generate and upload Proto descriptor set file to the server. +Refer [this guide](https://github.com/raystack/stencil/tree/master/server#readme) on how to set up and configure the Stencil server, and how to generate and upload Proto descriptor set file to the server. ### Monitoring -Depot supports sinks specific metrics, and it also provides way to send metrics to statsd reporter. +Depot supports sinks specific metrics, and it also provides way to send metrics to statsd reporter. You can set up any visualization platform like grafana for monitoring your custom application. Following are the typical requirements: -* StatsD host \(e.g. Telegraf\) for aggregation of metrics from StatsD client -* A time-series database \(e.g. InfluxDB\) to store the metrics -* GUI visualization dashboard \(e.g. Grafana\) for detailed visualisation of metrics - +- StatsD host \(e.g. Telegraf\) for aggregation of metrics from StatsD client +- A time-series database \(e.g. InfluxDB\) to store the metrics +- GUI visualization dashboard \(e.g. Grafana\) for detailed visualisation of metrics ### Adding a new Sink -To add a new sink implementation the Sink class has to implement OdpfSink interface + +To add a new sink implementation the Sink class has to implement Sink interface + ```java -public interface OdpfSink extends Closeable { - OdpfSinkResponse pushToSink(List messages) throws OdpfSinkException; +public interface Sink extends Closeable { + SinkResponse pushToSink(List messages) throws SinkException; } ``` -Sink implementations will normally have a factory class too which will be used to setup and create objects of Impl classes. + +Sink implementations will normally have a factory class too which will be used to setup and create objects of Impl classes. ## Style Guide @@ -116,8 +122,7 @@ $ git commit -s -m "feat: my first commit" #### Good practices to keep in mind -* Follow the [conventional commit](https://www.conventionalcommits.org/en/v1.0.0/) format for all commit messages. -* Fill in the description based on the default template configured when you first open the PR -* Include kind label when opening the PR -* Avoid force-pushing as it makes reviewing difficult - +- Follow the [conventional commit](https://www.conventionalcommits.org/en/v1.0.0/) format for all commit messages. +- Fill in the description based on the default template configured when you first open the PR +- Include kind label when opening the PR +- Avoid force-pushing as it makes reviewing difficult diff --git a/docs/reference/configuration/generic.md b/docs/reference/configuration/generic.md index 81f9b812..80870655 100644 --- a/docs/reference/configuration/generic.md +++ b/docs/reference/configuration/generic.md @@ -6,66 +6,66 @@ All sinks require the following variables to be set Application prefix for sink metrics. -* Example value: `application_` -* Type: `required` +- Example value: `application_` +- Type: `required` ## `SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS` -OdpfMessage log-message schema class +Message log-message schema class -* Example value: `io.odpf.schema.MessageClass` -* Type: `required` +- Example value: `org.raystack.schema.MessageClass` +- Type: `required` ## `SINK_CONNECTOR_SCHEMA_PROTO_KEY_CLASS` -OdpfMessage log-key schema class +Message log-key schema class -* Example value: `io.odpf.schema.KeyClass` -* Type: `required` +- Example value: `org.raystack.schema.KeyClass` +- Type: `required` ## `SINK_CONNECTOR_SCHEMA_DATA_TYPE` -OdpfMessage raw data type +Message raw data type -* Example value: `JSON` -* Type: `required` -* Default: `PROTOBUF` +- Example value: `JSON` +- Type: `required` +- Default: `PROTOBUF` ## `SINK_CONNECTOR_SCHEMA_MESSAGE_MODE` The type of raw message to read from -* Example value: `LOG_MESSAGE` -* Type: `required` -* Default: `LOG_MESSAGE` +- Example value: `LOG_MESSAGE` +- Type: `required` +- Default: `LOG_MESSAGE` ## `SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE` Allow unknown fields in proto schema -* Example value: `true` -* Type: `required` -* Default: `false` +- Example value: `true` +- Type: `required` +- Default: `false` ## `METRIC_STATSD_HOST` URL of the StatsD host -* Example value: `localhost` -* Type: `optional` -* Default value`: localhost` +- Example value: `localhost` +- Type: `optional` +- Default value`: localhost` ## `METRIC_STATSD_PORT` Port of the StatsD host -* Example value: `8125` -* Type: `optional` -* Default value`: 8125` +- Example value: `8125` +- Type: `optional` +- Default value`: 8125` ## `METRIC_STATSD_TAGS` Global tags for StatsD metrics. Tags must be comma-separated. -* Example value: `team=engineering,app=myapp` -* Type: `optional` \ No newline at end of file +- Example value: `team=engineering,app=myapp` +- Type: `optional` diff --git a/docs/reference/odpf_sink_response.md b/docs/reference/sink_response.md similarity index 63% rename from docs/reference/odpf_sink_response.md rename to docs/reference/sink_response.md index 1fe749f3..2235c196 100644 --- a/docs/reference/odpf_sink_response.md +++ b/docs/reference/sink_response.md @@ -1,16 +1,16 @@ -# OdpfSinkResponse +# SinkResponse ```java -public class OdpfSinkResponse { +public class SinkResponse { private final Map errors = new HashMap<>(); ... } ``` -SinkResponse will be returned by odpfSink.pushToSink(messageList) function call. +SinkResponse will be returned by sink.pushToSink(messageList) function call. The response contains error map indexed by message in the input list. ## Errors -These errors are returned by sinks in the OdpfSinkResponse object. The error type are: +These errors are returned by sinks in the SinkResponse object. The error type are: * DESERIALIZATION_ERROR * INVALID_MESSAGE_ERROR diff --git a/docs/roadmap.md b/docs/roadmap.md index 1eef242e..3c80da30 100644 --- a/docs/roadmap.md +++ b/docs/roadmap.md @@ -1,21 +1,21 @@ # Roadmap -In the following section, you can learn about what features we're working on, +In the following section, you can learn about what features we're working on, what stage they're in, and when we expect to bring them to you. -Have any questions or comments about items on the roadmap? -Join the [discussions](https://github.com/odpf/depot/discussions). +Have any questions or comments about items on the roadmap? +Join the [discussions](https://github.com/raystack/depot/discussions). -We’re planning to iterate on the format of the roadmap itself, -and we see the potential to engage more in discussions +We’re planning to iterate on the format of the roadmap itself, +and we see the potential to engage more in discussions about the future of sink connectors. -If you have feedback about the roadmap section itself, -such as how the issues are presented, -let us know through [discussions](https://github.com/odpf/depot/discussions). +If you have feedback about the roadmap section itself, +such as how the issues are presented, +let us know through [discussions](https://github.com/raystack/depot/discussions). ## Depot 0.1.4 ### Feature enhancements -* Support for Statsd Metrics -* Support for LogSink -* Support for [BigQuery](https://cloud.google.com/bigquery) sink +- Support for Statsd Metrics +- Support for LogSink +- Support for [BigQuery](https://cloud.google.com/bigquery) sink diff --git a/docs/sinks/bigquery.md b/docs/sinks/bigquery.md index 43cbde5b..450c1816 100644 --- a/docs/sinks/bigquery.md +++ b/docs/sinks/bigquery.md @@ -1,110 +1,116 @@ # Bigquery Sink ### Datatype Protobuf -Bigquery Sink has several responsibilities, first creation of bigquery table and dataset when they are not exist, + +Bigquery Sink has several responsibilities, first creation of bigquery table and dataset when they are not exist, second update the bigquery table schema based on the latest protobuf schema, third translate protobuf messages into bigquery records and insert them to bigquery tables. Bigquery utilise Bigquery [Streaming API](https://cloud.google.com/bigquery/streaming-data-into-bigquery) to insert record into bigquery tables. ### Datatype JSON + Bigquery Sink has several responsibilities, first creation of bigquery table and dataset when they are not exist, Currently we support dynamic schema by inferring from incoming json data; so the bigquery schema is updated by taking a diff of fields in json data and actual table fields. Currently we only support string data type for fields, so all incoming json data values are converted to string type, Except for metadata columns and partion key. ## Bigquery Table Schema Update -### Protobuf -Bigquery Sink update the bigquery table schema on separate table update operation. Bigquery utilise [Stencil](https://github.com/odpf/stencil) to parse protobuf messages generate schema and update bigquery tables with the latest schema. -The stencil client periodically reload the descriptor cache. Table schema update happened after the descriptor caches uploaded. +### Protobuf + +Bigquery Sink update the bigquery table schema on separate table update operation. Bigquery utilise [Stencil](https://github.com/raystack/stencil) to parse protobuf messages generate schema and update bigquery tables with the latest schema. +The stencil client periodically reload the descriptor cache. Table schema update happened after the descriptor caches uploaded. ### JSON + Bigquery Sink creates the table with initial columns mentioned in the config. When new fields arrive in json data they are added to bigquery table. + ### Flow chart for data type json sink and schema update + ![](../images/bigquery-json-flow-diagram.svg) ## Protobuf - Bigquery Table Type Mapping -Here are type conversion between protobuf type and bigquery type : - -| Protobuf Type | Bigquery Type | -| --- | ----------- | -| bytes | BYTES | -| string | STRING | -| enum | STRING | -| float | FLOAT | -| double | FLOAT | -| bool | BOOLEAN | -| int64, uint64, int32, uint32, fixed64, fixed32, sfixed64, sfixed32, sint64, sint32 | INTEGER | -| message | RECORD | -| .google.protobuf.Timestamp | TIMESTAMP | -| .google.protobuf.Struct | STRING (Json Serialised) | -| .google.protobuf.Duration | RECORD | +Here are type conversion between protobuf type and bigquery type : + +| Protobuf Type | Bigquery Type | +| ---------------------------------------------------------------------------------- | ------------------------ | +| bytes | BYTES | +| string | STRING | +| enum | STRING | +| float | FLOAT | +| double | FLOAT | +| bool | BOOLEAN | +| int64, uint64, int32, uint32, fixed64, fixed32, sfixed64, sfixed32, sint64, sint32 | INTEGER | +| message | RECORD | +| .google.protobuf.Timestamp | TIMESTAMP | +| .google.protobuf.Struct | STRING (Json Serialised) | +| .google.protobuf.Duration | RECORD | | Protobuf Modifier | Bigquery Modifier | -| --- | ----------- | -| repeated | REPEATED | - +| ----------------- | ----------------- | +| repeated | REPEATED | ## Partitioning Bigquery Sink supports creation of table with partition configuration. Currently, Bigquery Sink only supports time based partitioning. -To have time based partitioning protobuf `Timestamp` as field is needed on the protobuf message. The protobuf field will be used as partitioning column on table creation. +To have time based partitioning protobuf `Timestamp` as field is needed on the protobuf message. The protobuf field will be used as partitioning column on table creation. The time partitioning type that is currently supported is `DAY` partitioning. -## Clustering +## Clustering -Bigquery Sink support for creating and modifying clustering on the table. Clustering can improve the performance of certain types of queries such as queries that use filter clauses and queries that aggregate data. -When data is written to a clustered table by a query job or a load job, BigQuery sorts the data using the values in the clustering columns. These values are used to organize the data into multiple blocks in BigQuery storage. +Bigquery Sink support for creating and modifying clustering on the table. Clustering can improve the performance of certain types of queries such as queries that use filter clauses and queries that aggregate data. +When data is written to a clustered table by a query job or a load job, BigQuery sorts the data using the values in the clustering columns. These values are used to organize the data into multiple blocks in BigQuery storage. When you submit a query that contains a clause that filters data based on the clustering columns, BigQuery uses the sorted blocks to eliminate scans of unnecessary data. You might not see a significant difference in query performance between a clustered and unclustered table if the table or partition is under 1 GB. Follow [this](https://cloud.google.com/bigquery/docs/clustered-tables) for more details on Bigquery table clustering. ## Metadata -For data quality checking purposes, sometimes some metadata need to be added on the record. -if `SINK_BIGQUERY_ADD_METADATA_ENABLED` is true then the metadata will be added. +For data quality checking purposes, sometimes some metadata need to be added on the record. +if `SINK_BIGQUERY_ADD_METADATA_ENABLED` is true then the metadata will be added. `SINK_BIGQUERY_METADATA_NAMESPACE` is used for another namespace to add columns if namespace is empty, the metadata columns will be added in the root level. `SINK_BIGQUERY_METADATA_COLUMNS_TYPES` is set with kafka metadata column and their type, An example of metadata columns that can be added for kafka records. ## Default columns for json data type + With dynamic schema for json we need to create table with some default columns, example like parition key needs to be set during creation of the table. Example `SINK_BIGQUERY_DEFAULT_COLUMNS =event_timestamp=timestamp` The metadata columns are added when input data contains values for the them which will result in missing fields error and will be added by the JSON error handler. -| Fully Qualified Column Name | Type | Modifier | -| --- | ----------- | ------- | -| metadata_column | RECORD | NULLABLE | -| metadata_column.message_partition | INTEGER | NULLABLE | -| metadata_column.message_offset | INTEGER | NULLABLE | -| metadata_column.message_topic | STRING | NULLABLE | +| Fully Qualified Column Name | Type | Modifier | +| --------------------------------- | --------- | -------- | +| metadata_column | RECORD | NULLABLE | +| metadata_column.message_partition | INTEGER | NULLABLE | +| metadata_column.message_offset | INTEGER | NULLABLE | +| metadata_column.message_topic | STRING | NULLABLE | | metadata_column.message_timestamp | TIMESTAMP | NULLABLE | -| metadata_column.load_time | TIMESTAMP | NULLABLE | +| metadata_column.load_time | TIMESTAMP | NULLABLE | ## Errors Handling The response can contain multiple errors which will be sent to the application. -| Error Name | Generic Error Type | Description | -| --- | ----------- | ------- | -| Stopped Error | SINK_5XX_ERROR | Error on a row insertion that happened because insert job is cancelled because other record is invalid although current record is valid | -| Out of bounds Error | SINK_4XX_ERROR | Error on a row insertion the partitioned column has a date value less than 5 years and more than 1 year in the future | -| Invalid schema Error | SINK_4XX_ERROR | Error on a row insertion when there is a new field that is not exist on the table or when there is required field on the table | -| Other Error | SINK_UNKNOWN_ERROR | Uncategorized error | +| Error Name | Generic Error Type | Description | +| -------------------- | ------------------ | --------------------------------------------------------------------------------------------------------------------------------------- | +| Stopped Error | SINK_5XX_ERROR | Error on a row insertion that happened because insert job is cancelled because other record is invalid although current record is valid | +| Out of bounds Error | SINK_4XX_ERROR | Error on a row insertion the partitioned column has a date value less than 5 years and more than 1 year in the future | +| Invalid schema Error | SINK_4XX_ERROR | Error on a row insertion when there is a new field that is not exist on the table or when there is required field on the table | +| Other Error | SINK_UNKNOWN_ERROR | Uncategorized error | ## Google Cloud Bigquery IAM Permission Several IAM permission is required for bigquery sink to run properly, -* Create and update Table - * bigquery.tables.create - * bigquery.tables.get - * bigquery.tables.update -* Create and update Dataset - * bigquery.datasets.create - * bigquery.datasets.get - * bigquery.datasets.update -* Stream insert to Table - * bigquery.tables.updateData +- Create and update Table + - bigquery.tables.create + - bigquery.tables.get + - bigquery.tables.update +- Create and update Dataset + - bigquery.datasets.create + - bigquery.datasets.get + - bigquery.datasets.update +- Stream insert to Table + - bigquery.tables.updateData Further documentation on bigquery IAM permission [here](https://cloud.google.com/bigquery/streaming-data-into-bigquery). diff --git a/docs/sinks/bigtable.md b/docs/sinks/bigtable.md index 2bcb7f52..3242ea31 100644 --- a/docs/sinks/bigtable.md +++ b/docs/sinks/bigtable.md @@ -1,17 +1,19 @@ # Bigtable Sink ## Overview + Depot Bigtable Sink translates protobuf messages to bigtable records and insert them to a bigtable table. Its other responsibilities include validating the provided [column-family-schema](../reference/configuration/bigtable.md#sink_bigtable_column_family_mapping), and check whether the configured table exists in [Bigtable instance](../reference/configuration/bigtable.md#sink_bigtable_instance_id) or not. Depot uses [Java Client Library for the Cloud Bigtable API](https://cloud.google.com/bigtable/docs/reference/libraries) to perform any operations on Bigtable. ## Setup Required + To be able to insert/update records in Bigtable, One must have following setup in place: -* [Bigtable Instance](../reference/configuration/bigtable.md#sink_bigtable_instance_id) belonging to the [GCP project](../reference/configuration/bigtable.md#sink_bigtable_google_cloud_project_id) provided in configuration -* Bigtable [Table](../reference/configuration/bigtable.md#sink_bigtable_table_id) where the records are supposed to be inserted/updated -* Column families that are provided as part of [column-family-mapping](../reference/configuration/bigtable.md#sink_bigtable_column_family_mapping) -* Google cloud [Bigtable IAM permission](https://cloud.google.com/bigtable/docs/access-control) required to access and modify the configured Bigtable Instance and Table +- [Bigtable Instance](../reference/configuration/bigtable.md#sink_bigtable_instance_id) belonging to the [GCP project](../reference/configuration/bigtable.md#sink_bigtable_google_cloud_project_id) provided in configuration +- Bigtable [Table](../reference/configuration/bigtable.md#sink_bigtable_table_id) where the records are supposed to be inserted/updated +- Column families that are provided as part of [column-family-mapping](../reference/configuration/bigtable.md#sink_bigtable_column_family_mapping) +- Google cloud [Bigtable IAM permission](https://cloud.google.com/bigtable/docs/access-control) required to access and modify the configured Bigtable Instance and Table ## Metrics @@ -19,22 +21,22 @@ Check out the list of [metrics](../reference/metrics.md#bigtable-sink) captured ## Error Handling -[BigtableResponse](../../src/main/java/io/odpf/depot/bigtable/response/BigTableResponse.java) class have the list of failed [mutations](https://cloud.google.com/bigtable/docs/writes#write-types). [BigtableResponseParser](../../src/main/java/io/odpf/depot/bigtable/parser/BigTableResponseParser.java) looks for errors from each failed mutation and create [ErrorInfo](../../src/main/java/io/odpf/depot/error/ErrorInfo.java) objects based on the type/HttpStatusCode of the underlying error. This error info is then sent to the application. +[BigtableResponse](../../src/main/java/com/raystack/depot/bigtable/response/BigTableResponse.java) class have the list of failed [mutations](https://cloud.google.com/bigtable/docs/writes#write-types). [BigtableResponseParser](../../src/main/java/com/raystack/depot/bigtable/parser/BigTableResponseParser.java) looks for errors from each failed mutation and create [ErrorInfo](../../src/main/java/com/raystack/depot/error/ErrorInfo.java) objects based on the type/HttpStatusCode of the underlying error. This error info is then sent to the application. -| Error From Bigtable | Error Type Captured | -| --------------- | -------------------- | -| Retryable Error | SINK_RETRYABLE_ERROR | -| Having status code in range 400-499 | SINK_4XX_ERROR | -| Having status code in range 500-599 | SINK_5XX_ERROR | -| Any other Error | SINK_UNKNOWN_ERROR | +| Error From Bigtable | Error Type Captured | +| ----------------------------------- | -------------------- | +| Retryable Error | SINK_RETRYABLE_ERROR | +| Having status code in range 400-499 | SINK_4XX_ERROR | +| Having status code in range 500-599 | SINK_5XX_ERROR | +| Any other Error | SINK_UNKNOWN_ERROR | ### Error Telemetry -[BigtableResponseParser](../../src/main/java/io/odpf/depot/bigtable/parser/BigTableResponseParser.java) looks for any specific error types sent from Bigtable and capture those under [BigtableTotalErrorMetrics](../reference/metrics.md#bigtable-sink) with suitable error tags. +[BigtableResponseParser](../../src/main/java/com/raystack/depot/bigtable/parser/BigTableResponseParser.java) looks for any specific error types sent from Bigtable and capture those under [BigtableTotalErrorMetrics](../reference/metrics.md#bigtable-sink) with suitable error tags. -| Error Type | Error Tag Assigned | -| --------------- | -------------------- | -| Bad Request | BAD_REQUEST | -| Quota Failure | QUOTA_FAILURE | +| Error Type | Error Tag Assigned | +| -------------------- | -------------------- | +| Bad Request | BAD_REQUEST | +| Quota Failure | QUOTA_FAILURE | | Precondition Failure | PRECONDITION_FAILURE | -| Any other Error | RPC_FAILURE | +| Any other Error | RPC_FAILURE | diff --git a/gradlew.bat b/gradlew.bat index ac1b06f9..f9356d16 100644 --- a/gradlew.bat +++ b/gradlew.bat @@ -36,11 +36,11 @@ for %%i in ("%APP_HOME%") do set APP_HOME=%%~fi set DEFAULT_JVM_OPTS="-Xmx64m" "-Xms64m" @rem Find java.exe -if defined JAVA_HOME goto findJavaFromJavaHome +if defined JAVA_HOME raystack findJavaFromJavaHome set JAVA_EXE=java.exe %JAVA_EXE% -version >NUL 2>&1 -if "%ERRORLEVEL%" == "0" goto execute +if "%ERRORLEVEL%" == "0" raystack execute echo. echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. @@ -48,13 +48,13 @@ echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. -goto fail +raystack fail :findJavaFromJavaHome set JAVA_HOME=%JAVA_HOME:"=% set JAVA_EXE=%JAVA_HOME%/bin/java.exe -if exist "%JAVA_EXE%" goto execute +if exist "%JAVA_EXE%" raystack execute echo. echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% @@ -62,7 +62,7 @@ echo. echo Please set the JAVA_HOME variable in your environment to match the echo location of your Java installation. -goto fail +raystack fail :execute @rem Setup the command line @@ -75,7 +75,7 @@ set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar :end @rem End local scope for the variables with windows NT shell -if "%ERRORLEVEL%"=="0" goto mainEnd +if "%ERRORLEVEL%"=="0" raystack mainEnd :fail rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of diff --git a/src/main/java/io/odpf/depot/OdpfSink.java b/src/main/java/io/odpf/depot/OdpfSink.java deleted file mode 100644 index 558df726..00000000 --- a/src/main/java/io/odpf/depot/OdpfSink.java +++ /dev/null @@ -1,14 +0,0 @@ -package io.odpf.depot; - -import io.odpf.depot.exception.OdpfSinkException; -import io.odpf.depot.message.OdpfMessage; - -import java.io.Closeable; -import java.util.List; - -public interface OdpfSink extends Closeable { - - OdpfSinkResponse pushToSink(List messages) throws OdpfSinkException; -} - - diff --git a/src/main/java/io/odpf/depot/bigquery/BigQuerySinkFactory.java b/src/main/java/io/odpf/depot/bigquery/BigQuerySinkFactory.java deleted file mode 100644 index 39a62928..00000000 --- a/src/main/java/io/odpf/depot/bigquery/BigQuerySinkFactory.java +++ /dev/null @@ -1,90 +0,0 @@ -package io.odpf.depot.bigquery; - -import com.timgroup.statsd.NoOpStatsDClient; -import io.odpf.depot.bigquery.handler.ErrorHandler; -import io.odpf.depot.bigquery.handler.ErrorHandlerFactory; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.OdpfMessageParserFactory; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.stencil.OdpfStencilUpdateListener; -import io.odpf.depot.OdpfSink; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.client.BigQueryRow; -import io.odpf.depot.bigquery.client.BigQueryRowWithInsertId; -import io.odpf.depot.bigquery.client.BigQueryRowWithoutInsertId; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.config.BigQuerySinkConfig; -import org.aeonbits.owner.ConfigFactory; - -import java.io.IOException; -import java.util.Map; -import java.util.function.Function; - -public class BigQuerySinkFactory { - - private final StatsDReporter statsDReporter; - private BigQueryClient bigQueryClient; - private BigQueryRow rowCreator; - private final Function, String> rowIDCreator; - private BigQueryMetrics bigQueryMetrics; - private ErrorHandler errorHandler; - private MessageRecordConverterCache converterCache; - private final BigQuerySinkConfig sinkConfig; - - public BigQuerySinkFactory(Map env, StatsDReporter statsDReporter, Function, String> rowIDCreator) { - this(ConfigFactory.create(BigQuerySinkConfig.class, env), statsDReporter, rowIDCreator); - } - - public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig, StatsDReporter statsDReporter, Function, String> rowIDCreator) { - this.sinkConfig = sinkConfig; - this.rowIDCreator = rowIDCreator; - this.statsDReporter = statsDReporter; - } - - public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig) { - this(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), null); - } - - public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig, StatsDReporter statsDReporter) { - this(sinkConfig, statsDReporter, null); - } - - - public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig, Function, String> rowIDCreator) { - this(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), rowIDCreator); - } - - - public void init() { - try { - this.bigQueryMetrics = new BigQueryMetrics(sinkConfig); - this.bigQueryClient = new BigQueryClient(sinkConfig, bigQueryMetrics, new Instrumentation(statsDReporter, BigQueryClient.class)); - this.converterCache = new MessageRecordConverterCache(); - this.errorHandler = ErrorHandlerFactory.create(sinkConfig, bigQueryClient, statsDReporter); - OdpfStencilUpdateListener odpfStencilUpdateListener = BigqueryStencilUpdateListenerFactory.create(sinkConfig, bigQueryClient, converterCache, statsDReporter); - OdpfMessageParser odpfMessageParser = OdpfMessageParserFactory.getParser(sinkConfig, statsDReporter, odpfStencilUpdateListener); - odpfStencilUpdateListener.setOdpfMessageParser(odpfMessageParser); - odpfStencilUpdateListener.updateSchema(); - - if (sinkConfig.isRowInsertIdEnabled()) { - this.rowCreator = new BigQueryRowWithInsertId(rowIDCreator); - } else { - this.rowCreator = new BigQueryRowWithoutInsertId(); - } - } catch (IOException e) { - throw new IllegalArgumentException("Exception occurred while creating sink", e); - } - } - - public OdpfSink create() { - return new BigQuerySink( - bigQueryClient, - converterCache, - rowCreator, - bigQueryMetrics, - new Instrumentation(statsDReporter, BigQuerySink.class), - errorHandler); - } -} diff --git a/src/main/java/io/odpf/depot/bigquery/BigqueryStencilUpdateListenerFactory.java b/src/main/java/io/odpf/depot/bigquery/BigqueryStencilUpdateListenerFactory.java deleted file mode 100644 index 33472548..00000000 --- a/src/main/java/io/odpf/depot/bigquery/BigqueryStencilUpdateListenerFactory.java +++ /dev/null @@ -1,24 +0,0 @@ -package io.odpf.depot.bigquery; - -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.bigquery.json.BigqueryJsonUpdateListener; -import io.odpf.depot.bigquery.proto.BigqueryProtoUpdateListener; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.stencil.OdpfStencilUpdateListener; - -public class BigqueryStencilUpdateListenerFactory { - public static OdpfStencilUpdateListener create(BigQuerySinkConfig config, BigQueryClient bqClient, MessageRecordConverterCache converterCache, StatsDReporter statsDReporter) { - switch (config.getSinkConnectorSchemaDataType()) { - case JSON: - return new BigqueryJsonUpdateListener(config, converterCache, bqClient, new Instrumentation(statsDReporter, BigqueryJsonUpdateListener.class)); - case PROTOBUF: - return new BigqueryProtoUpdateListener(config, bqClient, converterCache); - default: - throw new ConfigurationException("Schema Type is not supported"); - } - } -} diff --git a/src/main/java/io/odpf/depot/bigquery/models/BQField.java b/src/main/java/io/odpf/depot/bigquery/models/BQField.java deleted file mode 100644 index 57e7da6b..00000000 --- a/src/main/java/io/odpf/depot/bigquery/models/BQField.java +++ /dev/null @@ -1,104 +0,0 @@ -package io.odpf.depot.bigquery.models; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.FieldList; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.protobuf.DescriptorProtos; -import io.odpf.depot.message.proto.Constants; -import io.odpf.depot.message.proto.ProtoField; -import io.odpf.depot.bigquery.exception.BQSchemaMappingException; -import lombok.EqualsAndHashCode; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -@EqualsAndHashCode -public class BQField { - private static final Map FIELD_LABEL_TO_BQ_MODE_MAP = new HashMap() {{ - put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, Field.Mode.NULLABLE); - put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, Field.Mode.REPEATED); - put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_REQUIRED, Field.Mode.REQUIRED); - }}; - private static final Map FIELD_TYPE_TO_BQ_TYPE_MAP = new HashMap() {{ - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, LegacySQLTypeName.BYTES); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, LegacySQLTypeName.FLOAT); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, LegacySQLTypeName.FLOAT); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, LegacySQLTypeName.BOOLEAN); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT32, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT64, LegacySQLTypeName.INTEGER); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, LegacySQLTypeName.RECORD); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_GROUP, LegacySQLTypeName.RECORD); - }}; - private static final Map FIELD_NAME_TO_BQ_TYPE_MAP = new HashMap() {{ - put(Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME, LegacySQLTypeName.TIMESTAMP); - put(Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME, LegacySQLTypeName.STRING); - put(Constants.ProtobufTypeName.DURATION_PROTOBUF_TYPE_NAME, LegacySQLTypeName.RECORD); - }}; - private final String name; - private final Field.Mode mode; - private final LegacySQLTypeName type; - private List subFields; - - public BQField(String name, Field.Mode mode, LegacySQLTypeName type, List subFields) { - this.name = name; - this.mode = mode; - this.type = type; - this.subFields = subFields; - } - - public BQField(ProtoField protoField) { - this.name = protoField.getName(); - this.mode = FIELD_LABEL_TO_BQ_MODE_MAP.get(protoField.getLabel()); - this.type = getType(protoField); - this.subFields = new ArrayList<>(); - } - - /** - * Map fully qualified type name or protobuf type to bigquery types. - * Fully qualified name will be used as mapping key before protobuf type being used - * @param protoField - * @return - */ - private LegacySQLTypeName getType(ProtoField protoField) { - LegacySQLTypeName typeFromFieldName = FIELD_NAME_TO_BQ_TYPE_MAP.get(protoField.getTypeName()) != null - ? FIELD_NAME_TO_BQ_TYPE_MAP.get(protoField.getTypeName()) - : FIELD_TYPE_TO_BQ_TYPE_MAP.get(protoField.getType()); - if (typeFromFieldName == null) { - throw new BQSchemaMappingException(String.format("No type mapping found for field: %s, fieldType: %s, typeName: %s", protoField.getName(), protoField.getType(), protoField.getTypeName())); - } - return typeFromFieldName; - } - - public void setSubFields(List fields) { - this.subFields = fields; - } - - public Field getField() { - if (this.subFields == null || this.subFields.size() == 0) { - return Field.newBuilder(this.name, this.type).setMode(this.mode).build(); - } - return Field.newBuilder(this.name, this.type, FieldList.of(subFields)).setMode(this.mode).build(); - } - - public String getName() { - return name; - } - - public LegacySQLTypeName getType() { - return type; - } - - -} diff --git a/src/main/java/io/odpf/depot/bigtable/parser/BigTableRowKeyParser.java b/src/main/java/io/odpf/depot/bigtable/parser/BigTableRowKeyParser.java deleted file mode 100644 index 1130031c..00000000 --- a/src/main/java/io/odpf/depot/bigtable/parser/BigTableRowKeyParser.java +++ /dev/null @@ -1,16 +0,0 @@ -package io.odpf.depot.bigtable.parser; - -import io.odpf.depot.common.Template; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import lombok.AllArgsConstructor; - -@AllArgsConstructor -public class BigTableRowKeyParser { - private final Template keyTemplate; - private final OdpfMessageSchema schema; - - public String parse(ParsedOdpfMessage parsedOdpfMessage) { - return keyTemplate.parse(parsedOdpfMessage, schema); - } -} diff --git a/src/main/java/io/odpf/depot/exception/OdpfSinkException.java b/src/main/java/io/odpf/depot/exception/OdpfSinkException.java deleted file mode 100644 index 499323c0..00000000 --- a/src/main/java/io/odpf/depot/exception/OdpfSinkException.java +++ /dev/null @@ -1,9 +0,0 @@ -package io.odpf.depot.exception; - -import java.io.IOException; - -public class OdpfSinkException extends IOException { - public OdpfSinkException(String message, Throwable th) { - super(message, th); - } -} diff --git a/src/main/java/io/odpf/depot/log/LogSink.java b/src/main/java/io/odpf/depot/log/LogSink.java deleted file mode 100644 index 3fbfda21..00000000 --- a/src/main/java/io/odpf/depot/log/LogSink.java +++ /dev/null @@ -1,57 +0,0 @@ -package io.odpf.depot.log; - -import io.odpf.depot.OdpfSink; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.OdpfSinkException; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.metrics.Instrumentation; - -import java.io.IOException; -import java.util.List; - -public class LogSink implements OdpfSink { - private final OdpfMessageParser odpfMessageParser; - private final Instrumentation instrumentation; - private final OdpfSinkConfig config; - - public LogSink(OdpfSinkConfig config, OdpfMessageParser odpfMessageParser, Instrumentation instrumentation) { - this.odpfMessageParser = odpfMessageParser; - this.instrumentation = instrumentation; - this.config = config; - } - - @Override - public OdpfSinkResponse pushToSink(List messages) throws OdpfSinkException { - OdpfSinkResponse response = new OdpfSinkResponse(); - SinkConnectorSchemaMessageMode mode = config.getSinkConnectorSchemaMessageMode(); - String schemaClass = mode == SinkConnectorSchemaMessageMode.LOG_MESSAGE - ? config.getSinkConnectorSchemaProtoMessageClass() : config.getSinkConnectorSchemaProtoKeyClass(); - for (int ii = 0; ii < messages.size(); ii++) { - OdpfMessage message = messages.get(ii); - try { - ParsedOdpfMessage parsedOdpfMessage = - odpfMessageParser.parse( - message, - mode, - schemaClass); - instrumentation.logInfo("\n================= DATA =======================\n{}" - + "\n================= METADATA =======================\n{}\n", - parsedOdpfMessage.toString(), message.getMetadataString()); - } catch (IOException e) { - response.addErrors(ii, new ErrorInfo(e, ErrorType.DESERIALIZATION_ERROR)); - } - } - return response; - } - - @Override - public void close() throws IOException { - - } -} diff --git a/src/main/java/io/odpf/depot/log/LogSinkFactory.java b/src/main/java/io/odpf/depot/log/LogSinkFactory.java deleted file mode 100644 index 2e2b9e2d..00000000 --- a/src/main/java/io/odpf/depot/log/LogSinkFactory.java +++ /dev/null @@ -1,40 +0,0 @@ -package io.odpf.depot.log; - -import com.timgroup.statsd.NoOpStatsDClient; -import io.odpf.depot.message.OdpfMessageParserFactory; -import io.odpf.depot.OdpfSink; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import org.aeonbits.owner.ConfigFactory; - -import java.util.Map; - -public class LogSinkFactory { - - private final StatsDReporter statsDReporter; - private OdpfMessageParser odpfMessageParser; - private final OdpfSinkConfig sinkConfig; - - public LogSinkFactory(Map env, StatsDReporter statsDReporter) { - this(ConfigFactory.create(OdpfSinkConfig.class, env), statsDReporter); - } - - public LogSinkFactory(OdpfSinkConfig sinkConfig, StatsDReporter statsDReporter) { - this.sinkConfig = sinkConfig; - this.statsDReporter = statsDReporter; - } - - public LogSinkFactory(OdpfSinkConfig sinkConfig) { - this(sinkConfig, new StatsDReporter(new NoOpStatsDClient())); - } - - public void init() { - this.odpfMessageParser = OdpfMessageParserFactory.getParser(sinkConfig, statsDReporter); - } - - public OdpfSink create() { - return new LogSink(sinkConfig, odpfMessageParser, new Instrumentation(statsDReporter, LogSink.class)); - } -} diff --git a/src/main/java/io/odpf/depot/message/OdpfMessageParser.java b/src/main/java/io/odpf/depot/message/OdpfMessageParser.java deleted file mode 100644 index a7a7b4db..00000000 --- a/src/main/java/io/odpf/depot/message/OdpfMessageParser.java +++ /dev/null @@ -1,9 +0,0 @@ -package io.odpf.depot.message; - -import java.io.IOException; - -public interface OdpfMessageParser { - ParsedOdpfMessage parse(OdpfMessage message, SinkConnectorSchemaMessageMode type, String schemaClass) throws IOException; - - OdpfMessageSchema getSchema(String schemaClass) throws IOException; -} diff --git a/src/main/java/io/odpf/depot/message/OdpfMessageParserFactory.java b/src/main/java/io/odpf/depot/message/OdpfMessageParserFactory.java deleted file mode 100644 index f095c47f..00000000 --- a/src/main/java/io/odpf/depot/message/OdpfMessageParserFactory.java +++ /dev/null @@ -1,28 +0,0 @@ -package io.odpf.depot.message; - -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.message.json.JsonOdpfMessageParser; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.JsonParserMetrics; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.stencil.OdpfStencilUpdateListener; - -public class OdpfMessageParserFactory { - public static OdpfMessageParser getParser(OdpfSinkConfig config, StatsDReporter statsDReporter, OdpfStencilUpdateListener odpfStencilUpdateListener) { - switch (config.getSinkConnectorSchemaDataType()) { - case JSON: - return new JsonOdpfMessageParser(config, - new Instrumentation(statsDReporter, JsonOdpfMessageParser.class), - new JsonParserMetrics(config)); - case PROTOBUF: - return new ProtoOdpfMessageParser(config, statsDReporter, odpfStencilUpdateListener); - default: - throw new IllegalArgumentException("Schema Type is not supported"); - } - } - - public static OdpfMessageParser getParser(OdpfSinkConfig config, StatsDReporter statsDReporter) { - return getParser(config, statsDReporter, null); - } -} diff --git a/src/main/java/io/odpf/depot/message/OdpfMessageSchema.java b/src/main/java/io/odpf/depot/message/OdpfMessageSchema.java deleted file mode 100644 index 62c09938..00000000 --- a/src/main/java/io/odpf/depot/message/OdpfMessageSchema.java +++ /dev/null @@ -1,5 +0,0 @@ -package io.odpf.depot.message; -public interface OdpfMessageSchema { - - Object getSchema(); -} diff --git a/src/main/java/io/odpf/depot/message/ParsedOdpfMessage.java b/src/main/java/io/odpf/depot/message/ParsedOdpfMessage.java deleted file mode 100644 index 8f96fa69..00000000 --- a/src/main/java/io/odpf/depot/message/ParsedOdpfMessage.java +++ /dev/null @@ -1,16 +0,0 @@ -package io.odpf.depot.message; - -import io.odpf.depot.config.OdpfSinkConfig; - -import java.io.IOException; -import java.util.Map; - -public interface ParsedOdpfMessage { - Object getRaw(); - - void validate(OdpfSinkConfig config); - - Map getMapping(OdpfMessageSchema schema) throws IOException; - - Object getFieldByName(String name, OdpfMessageSchema odpfMessageSchema); -} diff --git a/src/main/java/io/odpf/depot/redis/parsers/RedisEntryParser.java b/src/main/java/io/odpf/depot/redis/parsers/RedisEntryParser.java deleted file mode 100644 index 8efc31dc..00000000 --- a/src/main/java/io/odpf/depot/redis/parsers/RedisEntryParser.java +++ /dev/null @@ -1,11 +0,0 @@ -package io.odpf.depot.redis.parsers; - -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.redis.client.entry.RedisEntry; - -import java.util.List; - -public interface RedisEntryParser { - - List getRedisEntry(ParsedOdpfMessage parsedOdpfMessage); -} diff --git a/src/main/java/io/odpf/depot/redis/parsers/RedisHashSetEntryParser.java b/src/main/java/io/odpf/depot/redis/parsers/RedisHashSetEntryParser.java deleted file mode 100644 index f079071e..00000000 --- a/src/main/java/io/odpf/depot/redis/parsers/RedisHashSetEntryParser.java +++ /dev/null @@ -1,40 +0,0 @@ -package io.odpf.depot.redis.parsers; - -import io.odpf.depot.common.Template; -import io.odpf.depot.message.field.GenericFieldFactory; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.entry.RedisHashSetFieldEntry; -import lombok.AllArgsConstructor; - -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; - - -/** - * Redis hash set parser. - */ -@AllArgsConstructor -public class RedisHashSetEntryParser implements RedisEntryParser { - private final StatsDReporter statsDReporter; - private final Template keyTemplate; - private final Map fieldTemplates; - private final OdpfMessageSchema schema; - - @Override - public List getRedisEntry(ParsedOdpfMessage parsedOdpfMessage) { - String redisKey = keyTemplate.parse(parsedOdpfMessage, schema); - return fieldTemplates - .entrySet() - .stream() - .map(fieldTemplate -> { - String field = fieldTemplate.getValue().parse(parsedOdpfMessage, schema); - String redisValue = GenericFieldFactory.getField(parsedOdpfMessage.getFieldByName(fieldTemplate.getKey(), schema)).getString(); - return new RedisHashSetFieldEntry(redisKey, field, redisValue, new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)); - }).collect(Collectors.toList()); - } -} diff --git a/src/main/java/io/odpf/depot/redis/parsers/RedisKeyValueEntryParser.java b/src/main/java/io/odpf/depot/redis/parsers/RedisKeyValueEntryParser.java deleted file mode 100644 index dd296aac..00000000 --- a/src/main/java/io/odpf/depot/redis/parsers/RedisKeyValueEntryParser.java +++ /dev/null @@ -1,30 +0,0 @@ -package io.odpf.depot.redis.parsers; - -import io.odpf.depot.common.Template; -import io.odpf.depot.message.field.GenericFieldFactory; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.entry.RedisKeyValueEntry; -import lombok.AllArgsConstructor; - -import java.util.Collections; -import java.util.List; - -@AllArgsConstructor -public class RedisKeyValueEntryParser implements RedisEntryParser { - private final StatsDReporter statsDReporter; - private final Template keyTemplate; - private final String fieldName; - private final OdpfMessageSchema schema; - - @Override - public List getRedisEntry(ParsedOdpfMessage parsedOdpfMessage) { - String redisKey = keyTemplate.parse(parsedOdpfMessage, schema); - String redisValue = GenericFieldFactory.getField(parsedOdpfMessage.getFieldByName(fieldName, schema)).getString(); - RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(redisKey, redisValue, new Instrumentation(statsDReporter, RedisKeyValueEntry.class)); - return Collections.singletonList(redisKeyValueEntry); - } -} diff --git a/src/main/java/io/odpf/depot/redis/parsers/RedisListEntryParser.java b/src/main/java/io/odpf/depot/redis/parsers/RedisListEntryParser.java deleted file mode 100644 index e6e40a51..00000000 --- a/src/main/java/io/odpf/depot/redis/parsers/RedisListEntryParser.java +++ /dev/null @@ -1,33 +0,0 @@ -package io.odpf.depot.redis.parsers; - - -import io.odpf.depot.common.Template; -import io.odpf.depot.message.field.GenericFieldFactory; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.entry.RedisListEntry; -import lombok.AllArgsConstructor; - -import java.util.Collections; -import java.util.List; - -/** - * Redis list parser. - */ -@AllArgsConstructor -public class RedisListEntryParser implements RedisEntryParser { - private final StatsDReporter statsDReporter; - private final Template keyTemplate; - private final String field; - private final OdpfMessageSchema schema; - - @Override - public List getRedisEntry(ParsedOdpfMessage parsedOdpfMessage) { - String redisKey = keyTemplate.parse(parsedOdpfMessage, schema); - String redisValue = GenericFieldFactory.getField(parsedOdpfMessage.getFieldByName(field, schema)).getString(); - return Collections.singletonList(new RedisListEntry(redisKey, redisValue, new Instrumentation(statsDReporter, RedisListEntry.class))); - } -} diff --git a/src/main/java/org/raystack/depot/Sink.java b/src/main/java/org/raystack/depot/Sink.java new file mode 100644 index 00000000..f0c56f98 --- /dev/null +++ b/src/main/java/org/raystack/depot/Sink.java @@ -0,0 +1,12 @@ +package org.raystack.depot; + +import org.raystack.depot.exception.SinkException; +import org.raystack.depot.message.Message; + +import java.io.Closeable; +import java.util.List; + +public interface Sink extends Closeable { + + SinkResponse pushToSink(List messages) throws SinkException; +} diff --git a/src/main/java/io/odpf/depot/OdpfSinkResponse.java b/src/main/java/org/raystack/depot/SinkResponse.java similarity index 80% rename from src/main/java/io/odpf/depot/OdpfSinkResponse.java rename to src/main/java/org/raystack/depot/SinkResponse.java index 9de8409e..26e1c74a 100644 --- a/src/main/java/io/odpf/depot/OdpfSinkResponse.java +++ b/src/main/java/org/raystack/depot/SinkResponse.java @@ -1,15 +1,16 @@ -package io.odpf.depot; +package org.raystack.depot; -import io.odpf.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorInfo; import java.util.HashMap; import java.util.Map; -public class OdpfSinkResponse { +public class SinkResponse { private final Map errors = new HashMap<>(); /** - * Returns error as a map whose keys are indexes of messages that failed to be pushed. + * Returns error as a map whose keys are indexes of messages that failed to be + * pushed. * Each failed message index is associated with a {@link ErrorInfo}. */ public Map getErrors() { @@ -17,7 +18,8 @@ public Map getErrors() { } /** - * Returns error for the provided message index. If no error exists returns {@code null}. + * Returns error for the provided message index. If no error exists returns + * {@code null}. */ public ErrorInfo getErrorsFor(long index) { return errors.get(index); @@ -31,7 +33,8 @@ public void addErrors(long index, ErrorInfo errorInfo) { } /** - * Returns {@code true} if no row insertion failed, {@code false} otherwise. If {@code false}. + * Returns {@code true} if no row insertion failed, {@code false} otherwise. If + * {@code false}. * {@link #getErrors()} ()} returns an empty map. */ public boolean hasErrors() { diff --git a/src/main/java/io/odpf/depot/bigquery/BigQuerySink.java b/src/main/java/org/raystack/depot/bigquery/BigQuerySink.java similarity index 52% rename from src/main/java/io/odpf/depot/bigquery/BigQuerySink.java rename to src/main/java/org/raystack/depot/bigquery/BigQuerySink.java index 39563959..17a1d570 100644 --- a/src/main/java/io/odpf/depot/bigquery/BigQuerySink.java +++ b/src/main/java/org/raystack/depot/bigquery/BigQuerySink.java @@ -1,26 +1,26 @@ -package io.odpf.depot.bigquery; +package org.raystack.depot.bigquery; import com.google.cloud.bigquery.InsertAllRequest; import com.google.cloud.bigquery.InsertAllResponse; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.client.BigQueryResponseParser; -import io.odpf.depot.bigquery.client.BigQueryRow; -import io.odpf.depot.bigquery.handler.ErrorHandler; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.OdpfSink; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.bigquery.models.Records; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.client.BigQueryResponseParser; +import org.raystack.depot.bigquery.client.BigQueryRow; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.handler.ErrorHandler; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Records; +import org.raystack.depot.Sink; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; import java.io.IOException; import java.util.List; import java.util.Map; -public class BigQuerySink implements OdpfSink { +public class BigQuerySink implements Sink { private final BigQueryClient bigQueryClient; private final BigQueryRow rowCreator; @@ -30,11 +30,11 @@ public class BigQuerySink implements OdpfSink { private final ErrorHandler errorHandler; public BigQuerySink(BigQueryClient client, - MessageRecordConverterCache converterCache, - BigQueryRow rowCreator, - BigQueryMetrics bigQueryMetrics, - Instrumentation instrumentation, - ErrorHandler errorHandler) { + MessageRecordConverterCache converterCache, + BigQueryRow rowCreator, + BigQueryMetrics bigQueryMetrics, + Instrumentation instrumentation, + ErrorHandler errorHandler) { this.bigQueryClient = client; this.messageRecordConverterCache = converterCache; this.rowCreator = rowCreator; @@ -54,19 +54,22 @@ private InsertAllResponse insertIntoBQ(List records) { } @Override - public OdpfSinkResponse pushToSink(List messageList) { + public SinkResponse pushToSink(List messageList) { Records records = messageRecordConverterCache.getMessageRecordConverter().convert(messageList); - OdpfSinkResponse odpfSinkResponse = new OdpfSinkResponse(); - records.getInvalidRecords().forEach(invalidRecord -> odpfSinkResponse.addErrors(invalidRecord.getIndex(), invalidRecord.getErrorInfo())); + SinkResponse sinkResponse = new SinkResponse(); + records.getInvalidRecords().forEach( + invalidRecord -> sinkResponse.addErrors(invalidRecord.getIndex(), invalidRecord.getErrorInfo())); if (records.getValidRecords().size() > 0) { InsertAllResponse response = insertIntoBQ(records.getValidRecords()); - instrumentation.logInfo("Pushed a batch of {} records to BQ. Insert success?: {}", records.getValidRecords().size(), !response.hasErrors()); + instrumentation.logInfo("Pushed a batch of {} records to BQ. Insert success?: {}", + records.getValidRecords().size(), !response.hasErrors()); if (response.hasErrors()) { - Map errorInfoMap = BigQueryResponseParser.getErrorsFromBQResponse(records.getValidRecords(), response, bigQueryMetrics, instrumentation); - errorInfoMap.forEach(odpfSinkResponse::addErrors); + Map errorInfoMap = BigQueryResponseParser + .getErrorsFromBQResponse(records.getValidRecords(), response, bigQueryMetrics, instrumentation); + errorInfoMap.forEach(sinkResponse::addErrors); errorHandler.handle(response.getInsertErrors(), records.getValidRecords()); } } - return odpfSinkResponse; + return sinkResponse; } } diff --git a/src/main/java/org/raystack/depot/bigquery/BigQuerySinkFactory.java b/src/main/java/org/raystack/depot/bigquery/BigQuerySinkFactory.java new file mode 100644 index 00000000..633266b6 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/BigQuerySinkFactory.java @@ -0,0 +1,124 @@ +package org.raystack.depot.bigquery; + +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.client.BigQueryRow; +import org.raystack.depot.bigquery.client.BigQueryRowWithInsertId; +import org.raystack.depot.bigquery.client.BigQueryRowWithoutInsertId; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.handler.ErrorHandler; +import org.raystack.depot.bigquery.handler.ErrorHandlerFactory; +import org.raystack.depot.bigquery.storage.BigQueryStorageClient; +import org.raystack.depot.bigquery.storage.BigQueryStorageClientFactory; +import org.raystack.depot.bigquery.storage.BigQueryStorageResponseParser; +import org.raystack.depot.bigquery.storage.BigQueryWriter; +import org.raystack.depot.bigquery.storage.BigQueryWriterFactory; +import org.raystack.depot.bigquery.storage.BigQueryWriterUtils; +import com.timgroup.statsd.NoOpStatsDClient; +import org.raystack.depot.Sink; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageParserFactory; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.stencil.DepotStencilUpdateListener; +import org.aeonbits.owner.ConfigFactory; + +import java.io.IOException; +import java.util.Map; +import java.util.function.Function; + +public class BigQuerySinkFactory { + + private final StatsDReporter statsDReporter; + private final Function, String> rowIDCreator; + private final BigQuerySinkConfig sinkConfig; + private BigQueryClient bigQueryClient; + private BigQueryRow rowCreator; + private BigQueryMetrics bigQueryMetrics; + private ErrorHandler errorHandler; + private MessageRecordConverterCache converterCache; + private BigQueryStorageClient bigQueryStorageClient; + private BigQueryStorageResponseParser responseParser; + + public BigQuerySinkFactory(Map env, StatsDReporter statsDReporter, + Function, String> rowIDCreator) { + this(ConfigFactory.create(BigQuerySinkConfig.class, env), statsDReporter, rowIDCreator); + } + + public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig, StatsDReporter statsDReporter, + Function, String> rowIDCreator) { + this.sinkConfig = sinkConfig; + this.rowIDCreator = rowIDCreator; + this.statsDReporter = statsDReporter; + } + + public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig) { + this(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), null); + } + + public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig, StatsDReporter statsDReporter) { + this(sinkConfig, statsDReporter, null); + } + + public BigQuerySinkFactory(BigQuerySinkConfig sinkConfig, Function, String> rowIDCreator) { + this(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), rowIDCreator); + } + + public void init() { + try { + this.bigQueryMetrics = new BigQueryMetrics(sinkConfig); + this.bigQueryClient = new BigQueryClient(sinkConfig, bigQueryMetrics, + new Instrumentation(statsDReporter, BigQueryClient.class)); + this.converterCache = new MessageRecordConverterCache(); + this.errorHandler = ErrorHandlerFactory.create(sinkConfig, bigQueryClient, statsDReporter); + DepotStencilUpdateListener depotStencilUpdateListener = BigqueryStencilUpdateListenerFactory + .create(sinkConfig, bigQueryClient, converterCache, statsDReporter); + MessageParser messageParser = MessageParserFactory.getParser(sinkConfig, statsDReporter, + depotStencilUpdateListener); + depotStencilUpdateListener.setMessageParser(messageParser); + depotStencilUpdateListener.updateSchema(); + + if (sinkConfig.isRowInsertIdEnabled()) { + this.rowCreator = new BigQueryRowWithInsertId(rowIDCreator); + } else { + this.rowCreator = new BigQueryRowWithoutInsertId(); + } + if (sinkConfig.getSinkBigqueryStorageAPIEnable()) { + BigQueryWriter bigQueryWriter = BigQueryWriterFactory + .createBigQueryWriter( + sinkConfig, + BigQueryWriterUtils::getBigQueryWriterClient, + BigQueryWriterUtils::getCredentialsProvider, + BigQueryWriterUtils::getStreamWriter, + new Instrumentation(statsDReporter, BigQueryWriter.class), + bigQueryMetrics); + bigQueryWriter.init(); + bigQueryStorageClient = BigQueryStorageClientFactory.createBigQueryStorageClient(sinkConfig, + messageParser, bigQueryWriter); + responseParser = new BigQueryStorageResponseParser( + sinkConfig, + new Instrumentation(statsDReporter, BigQueryStorageResponseParser.class), + bigQueryMetrics); + } + } catch (IOException e) { + throw new IllegalArgumentException("Exception occurred while creating sink", e); + } + } + + public Sink create() { + if (sinkConfig.getSinkBigqueryStorageAPIEnable()) { + return new BigQueryStorageAPISink( + bigQueryStorageClient, + responseParser); + } else { + return new BigQuerySink( + bigQueryClient, + converterCache, + rowCreator, + bigQueryMetrics, + new Instrumentation(statsDReporter, BigQuerySink.class), + errorHandler); + } + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/BigQueryStorageAPISink.java b/src/main/java/org/raystack/depot/bigquery/BigQueryStorageAPISink.java new file mode 100644 index 00000000..6de9311b --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/BigQueryStorageAPISink.java @@ -0,0 +1,52 @@ +package org.raystack.depot.bigquery; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import org.raystack.depot.Sink; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.bigquery.storage.BigQueryPayload; +import org.raystack.depot.bigquery.storage.BigQueryStorageClient; +import org.raystack.depot.bigquery.storage.BigQueryStorageResponseParser; +import org.raystack.depot.exception.SinkException; +import org.raystack.depot.message.Message; + +import java.io.IOException; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class BigQueryStorageAPISink implements Sink { + private final BigQueryStorageClient bigQueryStorageClient; + private final BigQueryStorageResponseParser responseParser; + + public BigQueryStorageAPISink( + BigQueryStorageClient bigQueryStorageClient, + BigQueryStorageResponseParser responseParser) { + this.bigQueryStorageClient = bigQueryStorageClient; + this.responseParser = responseParser; + } + + @Override + public SinkResponse pushToSink(List messages) throws SinkException { + SinkResponse sinkResponse = new SinkResponse(); + BigQueryPayload payload = bigQueryStorageClient.convert(messages); + responseParser.setSinkResponseForInvalidMessages(payload, messages, sinkResponse); + if (payload.getPayloadIndexes().size() > 0) { + try { + AppendRowsResponse appendRowsResponse = bigQueryStorageClient.appendAndGet(payload); + responseParser.setSinkResponseForErrors(payload, appendRowsResponse, messages, sinkResponse); + } catch (ExecutionException e) { + e.printStackTrace(); + Throwable cause = e.getCause(); + responseParser.setSinkResponseForException(cause, payload, messages, sinkResponse); + } catch (InterruptedException e) { + e.printStackTrace(); + throw new SinkException("Interrupted exception occurred", e); + } + } + return sinkResponse; + } + + @Override + public void close() throws IOException { + bigQueryStorageClient.close(); + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/BigqueryStencilUpdateListenerFactory.java b/src/main/java/org/raystack/depot/bigquery/BigqueryStencilUpdateListenerFactory.java new file mode 100644 index 00000000..a9633638 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/BigqueryStencilUpdateListenerFactory.java @@ -0,0 +1,26 @@ +package org.raystack.depot.bigquery; + +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.json.BigqueryJsonUpdateListener; +import org.raystack.depot.bigquery.proto.BigqueryProtoUpdateListener; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.stencil.DepotStencilUpdateListener; + +public class BigqueryStencilUpdateListenerFactory { + public static DepotStencilUpdateListener create(BigQuerySinkConfig config, BigQueryClient bqClient, + MessageRecordConverterCache converterCache, StatsDReporter statsDReporter) { + switch (config.getSinkConnectorSchemaDataType()) { + case JSON: + return new BigqueryJsonUpdateListener(config, converterCache, bqClient, + new Instrumentation(statsDReporter, BigqueryJsonUpdateListener.class)); + case PROTOBUF: + return new BigqueryProtoUpdateListener(config, bqClient, converterCache); + default: + throw new ConfigurationException("Schema Type is not supported"); + } + } +} diff --git a/src/main/java/io/odpf/depot/bigquery/client/BigQueryClient.java b/src/main/java/org/raystack/depot/bigquery/client/BigQueryClient.java similarity index 89% rename from src/main/java/io/odpf/depot/bigquery/client/BigQueryClient.java rename to src/main/java/org/raystack/depot/bigquery/client/BigQueryClient.java index 581740f2..62d751af 100644 --- a/src/main/java/io/odpf/depot/bigquery/client/BigQueryClient.java +++ b/src/main/java/org/raystack/depot/bigquery/client/BigQueryClient.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.auth.oauth2.GoogleCredentials; import com.google.cloud.TransportOptions; @@ -17,10 +17,10 @@ import com.google.cloud.bigquery.TableId; import com.google.cloud.bigquery.TableInfo; import com.google.cloud.bigquery.TimePartitioning; -import io.odpf.depot.bigquery.exception.BQDatasetLocationChangedException; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.exception.BQDatasetLocationChangedException; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; import lombok.Getter; import java.io.FileInputStream; @@ -41,11 +41,13 @@ public class BigQueryClient { private final Random random = new Random(System.currentTimeMillis()); private final BigQueryMetrics bigqueryMetrics; - public BigQueryClient(BigQuerySinkConfig bqConfig, BigQueryMetrics bigQueryMetrics, Instrumentation instrumentation) throws IOException { + public BigQueryClient(BigQuerySinkConfig bqConfig, BigQueryMetrics bigQueryMetrics, Instrumentation instrumentation) + throws IOException { this(getBigQueryInstance(bqConfig), bqConfig, bigQueryMetrics, instrumentation); } - public BigQueryClient(BigQuery bq, BigQuerySinkConfig bqConfig, BigQueryMetrics bigQueryMetrics, Instrumentation instrumentation) { + public BigQueryClient(BigQuery bq, BigQuerySinkConfig bqConfig, BigQueryMetrics bigQueryMetrics, + Instrumentation instrumentation) { this.bigquery = bq; this.bqConfig = bqConfig; this.tableID = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); @@ -61,7 +63,8 @@ private static BigQuery getBigQueryInstance(BigQuerySinkConfig sinkConfig) throw .build(); return BigQueryOptions.newBuilder() .setTransportOptions(transportOptions) - .setCredentials(GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getBigQueryCredentialPath()))) + .setCredentials( + GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getBigQueryCredentialPath()))) .setProjectId(sinkConfig.getGCloudProjectID()) .build().getService(); } @@ -103,7 +106,8 @@ private void upsertDatasetAndTableWithRetry(TableInfo info) { instrumentation.logInfo("Waiting for " + sleepMillis + " milliseconds"); Thread.sleep(sleepMillis); } catch (InterruptedException interruptedException) { - instrumentation.captureNonFatalError(bigqueryMetrics.getErrorEventMetric(), interruptedException, "Sleep interrupted"); + instrumentation.captureNonFatalError(bigqueryMetrics.getErrorEventMetric(), + interruptedException, "Sleep interrupted"); } } else { throw e; @@ -120,8 +124,7 @@ private void upsertDatasetAndTable(TableInfo tableInfo) { Dataset.newBuilder(tableID.getDataset()) .setLocation(bqConfig.getBigQueryDatasetLocation()) .setLabels(bqConfig.getDatasetLabels()) - .build() - ); + .build()); instrumentation.logInfo("Successfully CREATED bigquery DATASET: {}", tableID.getDataset()); instrument(start, BigQueryMetrics.BigQueryAPIType.DATASET_CREATE); } else if (shouldUpdateDataset(dataSet)) { @@ -129,8 +132,7 @@ private void upsertDatasetAndTable(TableInfo tableInfo) { bigquery.update( Dataset.newBuilder(tableID.getDataset()) .setLabels(bqConfig.getDatasetLabels()) - .build() - ); + .build()); instrumentation.logInfo("Successfully UPDATED bigquery DATASET: {} with labels", tableID.getDataset()); instrument(start, BigQueryMetrics.BigQueryAPIType.DATASET_UPDATE); } @@ -161,12 +163,14 @@ private void instrument(Instant startTime, BigQueryMetrics.BigQueryAPIType type) bigqueryMetrics.getBigqueryOperationTotalMetric(), String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, tableID.getTable()), String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, tableID.getDataset()), + String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, tableID.getProject()), String.format(BigQueryMetrics.BIGQUERY_API_TAG, type)); instrumentation.captureDurationSince( bigqueryMetrics.getBigqueryOperationLatencyMetric(), startTime, String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, tableID.getTable()), String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, tableID.getDataset()), + String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, tableID.getProject()), String.format(BigQueryMetrics.BIGQUERY_API_TAG, type)); } @@ -196,8 +200,11 @@ private boolean shouldChangePartitionExpiryForStandardTable(Table table) { return false; } long neverExpireMs = 0L; - Long currentExpirationMs = timePartitioning.getExpirationMs() == null ? neverExpireMs : timePartitioning.getExpirationMs(); - Long newExpirationMs = bqConfig.getBigQueryTablePartitionExpiryMS() > 0 ? bqConfig.getBigQueryTablePartitionExpiryMS() : neverExpireMs; + Long currentExpirationMs = timePartitioning.getExpirationMs() == null ? neverExpireMs + : timePartitioning.getExpirationMs(); + Long newExpirationMs = bqConfig.getBigQueryTablePartitionExpiryMS() > 0 + ? bqConfig.getBigQueryTablePartitionExpiryMS() + : neverExpireMs; return !currentExpirationMs.equals(newExpirationMs); } diff --git a/src/main/java/io/odpf/depot/bigquery/client/BigQueryResponseParser.java b/src/main/java/org/raystack/depot/bigquery/client/BigQueryResponseParser.java similarity index 50% rename from src/main/java/io/odpf/depot/bigquery/client/BigQueryResponseParser.java rename to src/main/java/org/raystack/depot/bigquery/client/BigQueryResponseParser.java index ed8d2dce..c4e185aa 100644 --- a/src/main/java/io/odpf/depot/bigquery/client/BigQueryResponseParser.java +++ b/src/main/java/org/raystack/depot/bigquery/client/BigQueryResponseParser.java @@ -1,19 +1,19 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.BigQueryError; import com.google.cloud.bigquery.InsertAllResponse; -import io.odpf.depot.bigquery.error.ErrorDescriptor; -import io.odpf.depot.bigquery.error.ErrorParser; -import io.odpf.depot.bigquery.error.InvalidSchemaError; -import io.odpf.depot.bigquery.error.OOBError; -import io.odpf.depot.bigquery.error.StoppedError; -import io.odpf.depot.bigquery.error.UnknownError; -import io.odpf.depot.bigquery.exception.BigQuerySinkException; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.exception.BigQuerySinkException; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.error.ErrorDescriptor; +import org.raystack.depot.bigquery.error.ErrorParser; +import org.raystack.depot.bigquery.error.InvalidSchemaError; +import org.raystack.depot.bigquery.error.OOBError; +import org.raystack.depot.bigquery.error.StoppedError; +import org.raystack.depot.bigquery.error.UnknownError; import java.util.HashMap; import java.util.List; @@ -21,8 +21,10 @@ public class BigQueryResponseParser { /** - * Parses the {@link InsertAllResponse} object and returns errors type {@link ErrorDescriptor}. - * {@link InsertAllResponse} in bqResponse are 1 to 1 indexed based on the records that are requested to be inserted. + * Parses the {@link InsertAllResponse} object and returns errors type + * {@link ErrorDescriptor}. + * {@link InsertAllResponse} in bqResponse are 1 to 1 indexed based on the + * records that are requested to be inserted. * * @param records - list of records that were tried with BQ insertion * @param bqResponse - the status of insertion for all records as returned by BQ @@ -42,21 +44,30 @@ public static Map getErrorsFromBQResponse( Record record = records.get(errorEntry.getKey().intValue()); long messageIndex = record.getIndex(); List errors = ErrorParser.parseError(errorEntry.getValue()); - instrumentation.logError("Error while bigquery insert for message. Record: {}, Error: {}, MetaData: {}", + instrumentation.logError( + "Error while bigquery insert for message. \nRecord: {}, \nError: {}, \nMetaData: {}", record.getColumns(), errors, record.getMetadata()); if (errorMatch(errors, UnknownError.class)) { - errorInfoResponse.put(messageIndex, new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR)); - instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.UNKNOWN_ERROR)); + errorInfoResponse.put(messageIndex, + new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR)); + instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String + .format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.UNKNOWN_ERROR)); } else if (errorMatch(errors, InvalidSchemaError.class)) { - errorInfoResponse.put(messageIndex, new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR)); - instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.INVALID_SCHEMA_ERROR)); + errorInfoResponse.put(messageIndex, + new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR)); + instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String.format( + BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.INVALID_SCHEMA_ERROR)); } else if (errorMatch(errors, OOBError.class)) { - errorInfoResponse.put(messageIndex, new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR)); - instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.OOB_ERROR)); + errorInfoResponse.put(messageIndex, + new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR)); + instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), + String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.OOB_ERROR)); } else if (errorMatch(errors, StoppedError.class)) { - errorInfoResponse.put(messageIndex, new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_5XX_ERROR)); - instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.STOPPED_ERROR)); + errorInfoResponse.put(messageIndex, + new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_5XX_ERROR)); + instrumentation.incrementCounter(bigQueryMetrics.getBigqueryTotalErrorsMetrics(), String + .format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.STOPPED_ERROR)); } } return errorInfoResponse; diff --git a/src/main/java/io/odpf/depot/bigquery/client/BigQueryRow.java b/src/main/java/org/raystack/depot/bigquery/client/BigQueryRow.java similarity index 54% rename from src/main/java/io/odpf/depot/bigquery/client/BigQueryRow.java rename to src/main/java/org/raystack/depot/bigquery/client/BigQueryRow.java index c9236a1c..f1cc4e76 100644 --- a/src/main/java/io/odpf/depot/bigquery/client/BigQueryRow.java +++ b/src/main/java/org/raystack/depot/bigquery/client/BigQueryRow.java @@ -1,14 +1,13 @@ -package io.odpf.depot.bigquery.client; - +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Record; /** - * Fetches BQ insertable row from the base record {@link Record}. The implementations can differ if unique rows need to be inserted or not. + * Fetches BQ insertable row from the base record {@link Record}. The + * implementations can differ if unique rows need to be inserted or not. */ public interface BigQueryRow { InsertAllRequest.RowToInsert of(Record record); } - diff --git a/src/main/java/io/odpf/depot/bigquery/client/BigQueryRowWithInsertId.java b/src/main/java/org/raystack/depot/bigquery/client/BigQueryRowWithInsertId.java similarity index 85% rename from src/main/java/io/odpf/depot/bigquery/client/BigQueryRowWithInsertId.java rename to src/main/java/org/raystack/depot/bigquery/client/BigQueryRowWithInsertId.java index 08370314..c783738c 100644 --- a/src/main/java/io/odpf/depot/bigquery/client/BigQueryRowWithInsertId.java +++ b/src/main/java/org/raystack/depot/bigquery/client/BigQueryRowWithInsertId.java @@ -1,7 +1,7 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Record; import java.util.Map; import java.util.function.Function; diff --git a/src/main/java/io/odpf/depot/bigquery/client/BigQueryRowWithoutInsertId.java b/src/main/java/org/raystack/depot/bigquery/client/BigQueryRowWithoutInsertId.java similarity index 74% rename from src/main/java/io/odpf/depot/bigquery/client/BigQueryRowWithoutInsertId.java rename to src/main/java/org/raystack/depot/bigquery/client/BigQueryRowWithoutInsertId.java index c1b17a87..00a7acc2 100644 --- a/src/main/java/io/odpf/depot/bigquery/client/BigQueryRowWithoutInsertId.java +++ b/src/main/java/org/raystack/depot/bigquery/client/BigQueryRowWithoutInsertId.java @@ -1,7 +1,7 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Record; public class BigQueryRowWithoutInsertId implements BigQueryRow { diff --git a/src/main/java/io/odpf/depot/bigquery/client/BigQueryTableDefinition.java b/src/main/java/org/raystack/depot/bigquery/client/BigQueryTableDefinition.java similarity index 66% rename from src/main/java/io/odpf/depot/bigquery/client/BigQueryTableDefinition.java rename to src/main/java/org/raystack/depot/bigquery/client/BigQueryTableDefinition.java index 219f51b9..57f4320b 100644 --- a/src/main/java/io/odpf/depot/bigquery/client/BigQueryTableDefinition.java +++ b/src/main/java/org/raystack/depot/bigquery/client/BigQueryTableDefinition.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.Clustering; import com.google.cloud.bigquery.Field; @@ -6,9 +6,9 @@ import com.google.cloud.bigquery.Schema; import com.google.cloud.bigquery.StandardTableDefinition; import com.google.cloud.bigquery.TimePartitioning; -import io.odpf.depot.bigquery.exception.BQClusteringKeysException; -import io.odpf.depot.bigquery.exception.BQPartitionKeyNotSpecified; -import io.odpf.depot.config.BigQuerySinkConfig; +import org.raystack.depot.bigquery.exception.BQClusteringKeysException; +import org.raystack.depot.bigquery.exception.BQPartitionKeyNotSpecified; +import org.raystack.depot.config.BigQuerySinkConfig; import lombok.AllArgsConstructor; import java.util.List; @@ -37,17 +37,20 @@ public StandardTableDefinition getTableDefinition(Schema schema) { private TimePartitioning getPartitionedTableDefinition(Schema schema) { String tablePartitionKey = bqConfig.getTablePartitionKey(); if (tablePartitionKey == null) { - throw new BQPartitionKeyNotSpecified(String.format("Partition key not specified for the table: %s", bqConfig.getTableName())); + throw new BQPartitionKeyNotSpecified( + String.format("Partition key not specified for the table: %s", bqConfig.getTableName())); } Optional partitionFieldOptional = schema.getFields() .stream() .filter(obj -> tablePartitionKey.equals(obj.getName())) .findFirst(); if (!partitionFieldOptional.isPresent()) { - throw new BQPartitionKeyNotSpecified(String.format("Partition key %s is not present in the schema", tablePartitionKey)); + throw new BQPartitionKeyNotSpecified( + String.format("Partition key %s is not present in the schema", tablePartitionKey)); } Field partitionField = partitionFieldOptional.get(); - if (partitionField.getType() == LegacySQLTypeName.TIMESTAMP || partitionField.getType() == LegacySQLTypeName.DATE) { + if (partitionField.getType() == LegacySQLTypeName.TIMESTAMP + || partitionField.getType() == LegacySQLTypeName.DATE) { Long partitionExpiry = bqConfig.getBigQueryTablePartitionExpiryMS(); return TimePartitioning.newBuilder(TimePartitioning.Type.DAY) .setField(tablePartitionKey) @@ -55,21 +58,26 @@ private TimePartitioning getPartitionedTableDefinition(Schema schema) { .setExpirationMs(partitionExpiry <= 0 ? null : partitionExpiry) .build(); } else { - throw new UnsupportedOperationException("Range BigQuery partitioning is not supported, supported partition fields have to be of DATE or TIMESTAMP type"); + throw new UnsupportedOperationException( + "Range BigQuery partitioning is not supported, supported partition fields have to be of DATE or TIMESTAMP type"); } } private Clustering getClusteredTableDefinition(Schema schema) { if (bqConfig.getTableClusteringKeys().isEmpty()) { - throw new BQClusteringKeysException(String.format("Clustering key not specified for the table: %s", bqConfig.getTableName())); + throw new BQClusteringKeysException( + String.format("Clustering key not specified for the table: %s", bqConfig.getTableName())); } List columnNames = bqConfig.getTableClusteringKeys(); if (columnNames.size() > MAX_CLUSTERING_KEYS) { - throw new BQClusteringKeysException(String.format("Max number of columns for clustering is %d", MAX_CLUSTERING_KEYS)); + throw new BQClusteringKeysException( + String.format("Max number of columns for clustering is %d", MAX_CLUSTERING_KEYS)); } List fieldNames = schema.getFields().stream().map(Field::getName).collect(Collectors.toList()); if (!fieldNames.containsAll(columnNames)) { - throw new BQClusteringKeysException(String.format("One or more column names specified %s not exist on the schema or a nested type which is not supported for clustering", columnNames)); + throw new BQClusteringKeysException(String.format( + "One or more column names specified %s not exist on the schema or a nested type which is not supported for clustering", + columnNames)); } return Clustering.newBuilder().setFields(columnNames).build(); } diff --git a/src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverter.java b/src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverter.java similarity index 64% rename from src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverter.java rename to src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverter.java index 020371fc..fd10fc8d 100644 --- a/src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverter.java +++ b/src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverter.java @@ -1,18 +1,18 @@ -package io.odpf.depot.bigquery.converter; +package org.raystack.depot.bigquery.converter; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.bigquery.models.Records; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.DeserializerException; -import io.odpf.depot.exception.EmptyMessageException; -import io.odpf.depot.exception.UnknownFieldsException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Records; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.DeserializerException; +import org.raystack.depot.exception.EmptyMessageException; +import org.raystack.depot.exception.UnknownFieldsException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -25,16 +25,15 @@ @AllArgsConstructor @Slf4j public class MessageRecordConverter { - private final OdpfMessageParser parser; + private final MessageParser parser; private final BigQuerySinkConfig config; - private final OdpfMessageSchema schema; + private final MessageSchema schema; - - public Records convert(List messages) { + public Records convert(List messages) { ArrayList validRecords = new ArrayList<>(); ArrayList invalidRecords = new ArrayList<>(); for (int index = 0; index < messages.size(); index++) { - OdpfMessage message = messages.get(index); + Message message = messages.get(index); try { Record record = createRecord(message, index); validRecords.add(record); @@ -52,14 +51,15 @@ public Records convert(List messages) { return new Records(validRecords, invalidRecords); } - private Record createRecord(OdpfMessage message, int index) { + private Record createRecord(Message message, int index) { try { SinkConnectorSchemaMessageMode mode = config.getSinkConnectorSchemaMessageMode(); String schemaClass = mode == SinkConnectorSchemaMessageMode.LOG_MESSAGE - ? config.getSinkConnectorSchemaProtoMessageClass() : config.getSinkConnectorSchemaProtoKeyClass(); - ParsedOdpfMessage parsedOdpfMessage = parser.parse(message, mode, schemaClass); - parsedOdpfMessage.validate(config); - Map columns = parsedOdpfMessage.getMapping(schema); + ? config.getSinkConnectorSchemaProtoMessageClass() + : config.getSinkConnectorSchemaProtoKeyClass(); + ParsedMessage parsedMessage = parser.parse(message, mode, schemaClass); + parsedMessage.validate(config); + Map columns = parsedMessage.getMapping(schema); MessageRecordConverterUtils.addMetadata(columns, message, config); MessageRecordConverterUtils.addTimeStampColumnForJson(columns, config); return new Record(message.getMetadata(), columns, index, null); diff --git a/src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverterCache.java b/src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverterCache.java similarity index 73% rename from src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverterCache.java rename to src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverterCache.java index 4efacf20..5f6f552e 100644 --- a/src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverterCache.java +++ b/src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverterCache.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.converter; +package org.raystack.depot.bigquery.converter; import lombok.Data; diff --git a/src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverterUtils.java b/src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverterUtils.java similarity index 58% rename from src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverterUtils.java rename to src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverterUtils.java index 14b496f8..9394a2ae 100644 --- a/src/main/java/io/odpf/depot/bigquery/converter/MessageRecordConverterUtils.java +++ b/src/main/java/org/raystack/depot/bigquery/converter/MessageRecordConverterUtils.java @@ -1,11 +1,11 @@ -package io.odpf.depot.bigquery.converter; +package org.raystack.depot.bigquery.converter; import com.google.api.client.util.DateTime; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.config.enums.SinkConnectorSchemaDataType; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.utils.DateUtils; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.message.Message; +import org.raystack.depot.utils.DateUtils; import java.util.List; import java.util.Map; @@ -15,19 +15,20 @@ public class MessageRecordConverterUtils { public static final String JSON_TIME_STAMP_COLUMN = "event_timestamp"; - public static void addMetadata(Map columns, OdpfMessage message, BigQuerySinkConfig config) { + public static void addMetadata(Map columns, Message message, BigQuerySinkConfig config) { if (config.shouldAddMetadata()) { List metadataColumnsTypes = config.getMetadataColumnsTypes(); Map metadata = message.getMetadata(metadataColumnsTypes); - Map finalMetadata = metadataColumnsTypes.stream().collect(Collectors.toMap(TupleString::getFirst, t -> { - String key = t.getFirst(); - String dataType = t.getSecond(); - Object value = metadata.get(key); - if (value instanceof Long && dataType.equals("timestamp")) { - value = new DateTime((long) value); - } - return value; - })); + Map finalMetadata = metadataColumnsTypes.stream() + .collect(Collectors.toMap(TupleString::getFirst, t -> { + String key = t.getFirst(); + String dataType = t.getSecond(); + Object value = metadata.get(key); + if (value instanceof Long && dataType.equals("timestamp")) { + value = new DateTime((long) value); + } + return value; + })); if (config.getBqMetadataNamespace().isEmpty()) { columns.putAll(finalMetadata); } else { @@ -44,4 +45,3 @@ public static void addTimeStampColumnForJson(Map columns, BigQue } } } - diff --git a/src/main/java/io/odpf/depot/bigquery/error/ErrorDescriptor.java b/src/main/java/org/raystack/depot/bigquery/error/ErrorDescriptor.java similarity index 68% rename from src/main/java/io/odpf/depot/bigquery/error/ErrorDescriptor.java rename to src/main/java/org/raystack/depot/bigquery/error/ErrorDescriptor.java index b9b4d0bb..041b7abe 100644 --- a/src/main/java/io/odpf/depot/bigquery/error/ErrorDescriptor.java +++ b/src/main/java/org/raystack/depot/bigquery/error/ErrorDescriptor.java @@ -1,13 +1,14 @@ -package io.odpf.depot.bigquery.error; - +package org.raystack.depot.bigquery.error; /** - * Descriptor interface that defines the various error descriptors and the corresponding error types. + * Descriptor interface that defines the various error descriptors and the + * corresponding error types. */ public interface ErrorDescriptor { /** - * If the implementing descriptor matches the condition as prescribed in the concrete implementation. + * If the implementing descriptor matches the condition as prescribed in the + * concrete implementation. * * @return - true if the condition matches, false otherwise. */ diff --git a/src/main/java/io/odpf/depot/bigquery/error/ErrorParser.java b/src/main/java/org/raystack/depot/bigquery/error/ErrorParser.java similarity index 95% rename from src/main/java/io/odpf/depot/bigquery/error/ErrorParser.java rename to src/main/java/org/raystack/depot/bigquery/error/ErrorParser.java index 6752b7ce..55dea722 100644 --- a/src/main/java/io/odpf/depot/bigquery/error/ErrorParser.java +++ b/src/main/java/org/raystack/depot/bigquery/error/ErrorParser.java @@ -1,5 +1,4 @@ -package io.odpf.depot.bigquery.error; - +package org.raystack.depot.bigquery.error; import com.google.cloud.bigquery.BigQueryError; diff --git a/src/main/java/io/odpf/depot/bigquery/error/InvalidSchemaError.java b/src/main/java/org/raystack/depot/bigquery/error/InvalidSchemaError.java similarity index 93% rename from src/main/java/io/odpf/depot/bigquery/error/InvalidSchemaError.java rename to src/main/java/org/raystack/depot/bigquery/error/InvalidSchemaError.java index 185e32f4..8d3bb523 100644 --- a/src/main/java/io/odpf/depot/bigquery/error/InvalidSchemaError.java +++ b/src/main/java/org/raystack/depot/bigquery/error/InvalidSchemaError.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.error; +package org.raystack.depot.bigquery.error; import lombok.AllArgsConstructor; @@ -9,7 +9,7 @@ * or an invalid table schema. * * https://cloud.google.com/bigquery/docs/error-messages - * */ + */ public class InvalidSchemaError implements ErrorDescriptor { private final String reason; diff --git a/src/main/java/io/odpf/depot/bigquery/error/OOBError.java b/src/main/java/org/raystack/depot/bigquery/error/OOBError.java similarity index 60% rename from src/main/java/io/odpf/depot/bigquery/error/OOBError.java rename to src/main/java/org/raystack/depot/bigquery/error/OOBError.java index 617b05d7..06ad0f6a 100644 --- a/src/main/java/io/odpf/depot/bigquery/error/OOBError.java +++ b/src/main/java/org/raystack/depot/bigquery/error/OOBError.java @@ -1,12 +1,13 @@ -package io.odpf.depot.bigquery.error; +package org.raystack.depot.bigquery.error; import lombok.AllArgsConstructor; @AllArgsConstructor /** - * Out of bounds are caused when the partitioned column has a date value less than + * Out of bounds are caused when the partitioned column has a date value less + * than * 5 years and more than 1 year in future - * */ + */ public class OOBError implements ErrorDescriptor { private final String reason; @@ -15,8 +16,9 @@ public class OOBError implements ErrorDescriptor { @Override public boolean matches() { return reason.equals("invalid") - && ((message.contains("is outside the allowed bounds") && message.contains("days in the past and") && message.contains("days in the future")) - || message.contains("out of range")); + && ((message.contains("is outside the allowed bounds") && message.contains("days in the past and") + && message.contains("days in the future")) + || message.contains("out of range")); } @Override diff --git a/src/main/java/io/odpf/depot/bigquery/error/StoppedError.java b/src/main/java/org/raystack/depot/bigquery/error/StoppedError.java similarity index 73% rename from src/main/java/io/odpf/depot/bigquery/error/StoppedError.java rename to src/main/java/org/raystack/depot/bigquery/error/StoppedError.java index ba8ad929..eb2208ba 100644 --- a/src/main/java/io/odpf/depot/bigquery/error/StoppedError.java +++ b/src/main/java/org/raystack/depot/bigquery/error/StoppedError.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.error; +package org.raystack.depot.bigquery.error; import lombok.AllArgsConstructor; @@ -6,11 +6,12 @@ /** * stopped 200 This status code returns when a job is canceled. * This will be returned if a batch of insertion has some bad records - * which caused the job to be cancelled. Bad records will have some *other* error + * which caused the job to be cancelled. Bad records will have some *other* + * error * but rest of records will be marked as "stopped" and can be sent as is * * https://cloud.google.com/bigquery/docs/error-messages - * */ + */ public class StoppedError implements ErrorDescriptor { private final String reason; @@ -22,6 +23,6 @@ public boolean matches() { @Override public String toString() { - return "StoppedError: Failed to insert this row because of some (other)error records in batch"; + return "StoppedError: BigQuery encountered an error on individual rows in the request, none of the rows are inserted. This error can be retried"; } } diff --git a/src/main/java/io/odpf/depot/bigquery/error/UnknownError.java b/src/main/java/org/raystack/depot/bigquery/error/UnknownError.java similarity index 91% rename from src/main/java/io/odpf/depot/bigquery/error/UnknownError.java rename to src/main/java/org/raystack/depot/bigquery/error/UnknownError.java index e031b78b..32ce0b8a 100644 --- a/src/main/java/io/odpf/depot/bigquery/error/UnknownError.java +++ b/src/main/java/org/raystack/depot/bigquery/error/UnknownError.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.error; +package org.raystack.depot.bigquery.error; import lombok.AllArgsConstructor; @@ -6,7 +6,7 @@ /** * UnknownError is used when error factory failed to match any possible * known errors - * */ + */ public class UnknownError implements ErrorDescriptor { private String reason; diff --git a/src/main/java/io/odpf/depot/bigquery/exception/BQClusteringKeysException.java b/src/main/java/org/raystack/depot/bigquery/exception/BQClusteringKeysException.java similarity index 76% rename from src/main/java/io/odpf/depot/bigquery/exception/BQClusteringKeysException.java rename to src/main/java/org/raystack/depot/bigquery/exception/BQClusteringKeysException.java index 369c2d11..8f0fd2e8 100644 --- a/src/main/java/io/odpf/depot/bigquery/exception/BQClusteringKeysException.java +++ b/src/main/java/org/raystack/depot/bigquery/exception/BQClusteringKeysException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.exception; +package org.raystack.depot.bigquery.exception; public class BQClusteringKeysException extends RuntimeException { public BQClusteringKeysException(String message) { diff --git a/src/main/java/io/odpf/depot/bigquery/exception/BQDatasetLocationChangedException.java b/src/main/java/org/raystack/depot/bigquery/exception/BQDatasetLocationChangedException.java similarity index 78% rename from src/main/java/io/odpf/depot/bigquery/exception/BQDatasetLocationChangedException.java rename to src/main/java/org/raystack/depot/bigquery/exception/BQDatasetLocationChangedException.java index dc2c39c0..d30d0a19 100644 --- a/src/main/java/io/odpf/depot/bigquery/exception/BQDatasetLocationChangedException.java +++ b/src/main/java/org/raystack/depot/bigquery/exception/BQDatasetLocationChangedException.java @@ -1,8 +1,7 @@ -package io.odpf.depot.bigquery.exception; +package org.raystack.depot.bigquery.exception; public class BQDatasetLocationChangedException extends RuntimeException { public BQDatasetLocationChangedException(String message) { super(message); } } - diff --git a/src/main/java/io/odpf/depot/bigquery/exception/BQPartitionKeyNotSpecified.java b/src/main/java/org/raystack/depot/bigquery/exception/BQPartitionKeyNotSpecified.java similarity index 76% rename from src/main/java/io/odpf/depot/bigquery/exception/BQPartitionKeyNotSpecified.java rename to src/main/java/org/raystack/depot/bigquery/exception/BQPartitionKeyNotSpecified.java index d09da7a7..eb9524ba 100644 --- a/src/main/java/io/odpf/depot/bigquery/exception/BQPartitionKeyNotSpecified.java +++ b/src/main/java/org/raystack/depot/bigquery/exception/BQPartitionKeyNotSpecified.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.exception; +package org.raystack.depot.bigquery.exception; public class BQPartitionKeyNotSpecified extends RuntimeException { public BQPartitionKeyNotSpecified(String message) { diff --git a/src/main/java/io/odpf/depot/bigquery/exception/BQSchemaMappingException.java b/src/main/java/org/raystack/depot/bigquery/exception/BQSchemaMappingException.java similarity index 76% rename from src/main/java/io/odpf/depot/bigquery/exception/BQSchemaMappingException.java rename to src/main/java/org/raystack/depot/bigquery/exception/BQSchemaMappingException.java index aa635a04..29e23889 100644 --- a/src/main/java/io/odpf/depot/bigquery/exception/BQSchemaMappingException.java +++ b/src/main/java/org/raystack/depot/bigquery/exception/BQSchemaMappingException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.exception; +package org.raystack.depot.bigquery.exception; public class BQSchemaMappingException extends RuntimeException { public BQSchemaMappingException(String message) { diff --git a/src/main/java/io/odpf/depot/bigquery/exception/BQTableUpdateFailure.java b/src/main/java/org/raystack/depot/bigquery/exception/BQTableUpdateFailure.java similarity index 78% rename from src/main/java/io/odpf/depot/bigquery/exception/BQTableUpdateFailure.java rename to src/main/java/org/raystack/depot/bigquery/exception/BQTableUpdateFailure.java index dd91edff..8ebe8a05 100644 --- a/src/main/java/io/odpf/depot/bigquery/exception/BQTableUpdateFailure.java +++ b/src/main/java/org/raystack/depot/bigquery/exception/BQTableUpdateFailure.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.exception; +package org.raystack.depot.bigquery.exception; public class BQTableUpdateFailure extends RuntimeException { public BQTableUpdateFailure(String message, Throwable rootCause) { diff --git a/src/main/java/io/odpf/depot/bigquery/exception/BigQuerySinkException.java b/src/main/java/org/raystack/depot/bigquery/exception/BigQuerySinkException.java similarity index 74% rename from src/main/java/io/odpf/depot/bigquery/exception/BigQuerySinkException.java rename to src/main/java/org/raystack/depot/bigquery/exception/BigQuerySinkException.java index b81271b9..51015d34 100644 --- a/src/main/java/io/odpf/depot/bigquery/exception/BigQuerySinkException.java +++ b/src/main/java/org/raystack/depot/bigquery/exception/BigQuerySinkException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.exception; +package org.raystack.depot.bigquery.exception; import lombok.EqualsAndHashCode; diff --git a/src/main/java/io/odpf/depot/bigquery/handler/ErrorHandler.java b/src/main/java/org/raystack/depot/bigquery/handler/ErrorHandler.java similarity index 70% rename from src/main/java/io/odpf/depot/bigquery/handler/ErrorHandler.java rename to src/main/java/org/raystack/depot/bigquery/handler/ErrorHandler.java index d4ecc7cf..ae34313a 100644 --- a/src/main/java/io/odpf/depot/bigquery/handler/ErrorHandler.java +++ b/src/main/java/org/raystack/depot/bigquery/handler/ErrorHandler.java @@ -1,7 +1,7 @@ -package io.odpf.depot.bigquery.handler; +package org.raystack.depot.bigquery.handler; import com.google.cloud.bigquery.BigQueryError; -import io.odpf.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Record; import java.util.List; import java.util.Map; diff --git a/src/main/java/io/odpf/depot/bigquery/handler/ErrorHandlerFactory.java b/src/main/java/org/raystack/depot/bigquery/handler/ErrorHandlerFactory.java similarity index 51% rename from src/main/java/io/odpf/depot/bigquery/handler/ErrorHandlerFactory.java rename to src/main/java/org/raystack/depot/bigquery/handler/ErrorHandlerFactory.java index efeb8e34..9ba1f719 100644 --- a/src/main/java/io/odpf/depot/bigquery/handler/ErrorHandlerFactory.java +++ b/src/main/java/org/raystack/depot/bigquery/handler/ErrorHandlerFactory.java @@ -1,13 +1,14 @@ -package io.odpf.depot.bigquery.handler; +package org.raystack.depot.bigquery.handler; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.config.enums.SinkConnectorSchemaDataType; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.bigquery.client.BigQueryClient; public class ErrorHandlerFactory { - public static ErrorHandler create(BigQuerySinkConfig sinkConfig, BigQueryClient bigQueryClient, StatsDReporter statsDReprter) { + public static ErrorHandler create(BigQuerySinkConfig sinkConfig, BigQueryClient bigQueryClient, + StatsDReporter statsDReprter) { if (SinkConnectorSchemaDataType.JSON == sinkConfig.getSinkConnectorSchemaDataType()) { return new JsonErrorHandler( bigQueryClient, diff --git a/src/main/java/io/odpf/depot/bigquery/handler/JsonErrorHandler.java b/src/main/java/org/raystack/depot/bigquery/handler/JsonErrorHandler.java similarity index 80% rename from src/main/java/io/odpf/depot/bigquery/handler/JsonErrorHandler.java rename to src/main/java/org/raystack/depot/bigquery/handler/JsonErrorHandler.java index e882bdf9..83f335dc 100644 --- a/src/main/java/io/odpf/depot/bigquery/handler/JsonErrorHandler.java +++ b/src/main/java/org/raystack/depot/bigquery/handler/JsonErrorHandler.java @@ -1,15 +1,15 @@ -package io.odpf.depot.bigquery.handler; +package org.raystack.depot.bigquery.handler; import com.google.cloud.bigquery.BigQueryError; import com.google.cloud.bigquery.Field; import com.google.cloud.bigquery.FieldList; import com.google.cloud.bigquery.LegacySQLTypeName; import com.google.cloud.bigquery.Schema; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.client.BigQueryClient; import java.util.ArrayList; import java.util.Collection; @@ -33,7 +33,8 @@ public class JsonErrorHandler implements ErrorHandler { private final Instrumentation instrumentation; private final Map defaultColumnsMap; - public JsonErrorHandler(BigQueryClient bigQueryClient, BigQuerySinkConfig bigQuerySinkConfig, Instrumentation instrumentation) { + public JsonErrorHandler(BigQueryClient bigQueryClient, BigQuerySinkConfig bigQuerySinkConfig, + Instrumentation instrumentation) { this.instrumentation = instrumentation; this.bigQueryClient = bigQueryClient; @@ -73,13 +74,14 @@ public void handle(Map> insertErrors, List rec } } - private Set getColumnNamesForRecordsWhichHadUnknownBqFieldErrors(List records, Entry> x) { + private Set getColumnNamesForRecordsWhichHadUnknownBqFieldErrors(List records, + Entry> x) { int recordKey = x.getKey().intValue(); return records.get(recordKey).getColumns().keySet(); } - - private List>> getUnknownFieldBqErrors(Map> insertErrors) { + private List>> getUnknownFieldBqErrors( + Map> insertErrors) { return insertErrors.entrySet().stream() .filter((x) -> { List value = x.getValue(); @@ -90,8 +92,9 @@ private List>> getUnknownFieldBqErrors(Map getBqErrorsWithNoSuchFields(List value) { return value.stream() - .filter(bigQueryError -> bigQueryError.getReason().equals("invalid") && bigQueryError.getMessage().contains("no such field") - ).collect(Collectors.toList()); + .filter(bigQueryError -> bigQueryError.getReason().equals("invalid") + && bigQueryError.getMessage().contains("no such field")) + .collect(Collectors.toList()); } /** @@ -100,10 +103,12 @@ private List getBqErrorsWithNoSuchFields(List valu private Field getField(String key) { if (!bqMetadataNamespace.isEmpty()) { - throw new UnsupportedOperationException("metadata namespace is not supported, because nested json structure is not supported"); + throw new UnsupportedOperationException( + "metadata namespace is not supported, because nested json structure is not supported"); } if (metadataColumnsTypesMap.containsKey(key)) { - return Field.newBuilder(key, LegacySQLTypeName.valueOfStrict(metadataColumnsTypesMap.get(key).toUpperCase())) + return Field + .newBuilder(key, LegacySQLTypeName.valueOfStrict(metadataColumnsTypesMap.get(key).toUpperCase())) .setMode(Field.Mode.NULLABLE) .build(); } @@ -113,7 +118,8 @@ private Field getField(String key) { .build(); } if (!castAllColumnsToStringDataType) { - throw new UnsupportedOperationException("only string data type is supported for fields other than partition key"); + throw new UnsupportedOperationException( + "only string data type is supported for fields other than partition key"); } return Field.newBuilder(key, LegacySQLTypeName.STRING) .setMode(Field.Mode.NULLABLE) diff --git a/src/main/java/io/odpf/depot/bigquery/handler/NoopErrorHandler.java b/src/main/java/org/raystack/depot/bigquery/handler/NoopErrorHandler.java similarity index 69% rename from src/main/java/io/odpf/depot/bigquery/handler/NoopErrorHandler.java rename to src/main/java/org/raystack/depot/bigquery/handler/NoopErrorHandler.java index ae205126..4b2053fc 100644 --- a/src/main/java/io/odpf/depot/bigquery/handler/NoopErrorHandler.java +++ b/src/main/java/org/raystack/depot/bigquery/handler/NoopErrorHandler.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.handler; +package org.raystack.depot.bigquery.handler; import lombok.extern.slf4j.Slf4j; diff --git a/src/main/java/io/odpf/depot/bigquery/json/BigqueryJsonUpdateListener.java b/src/main/java/org/raystack/depot/bigquery/json/BigqueryJsonUpdateListener.java similarity index 71% rename from src/main/java/io/odpf/depot/bigquery/json/BigqueryJsonUpdateListener.java rename to src/main/java/org/raystack/depot/bigquery/json/BigqueryJsonUpdateListener.java index 39d3b62c..6d0b3364 100644 --- a/src/main/java/io/odpf/depot/bigquery/json/BigqueryJsonUpdateListener.java +++ b/src/main/java/org/raystack/depot/bigquery/json/BigqueryJsonUpdateListener.java @@ -1,20 +1,20 @@ -package io.odpf.depot.bigquery.json; +package org.raystack.depot.bigquery.json; import com.google.cloud.bigquery.BigQueryException; import com.google.cloud.bigquery.Field; import com.google.cloud.bigquery.FieldList; import com.google.cloud.bigquery.LegacySQLTypeName; import com.google.cloud.bigquery.Schema; -import io.odpf.depot.bigquery.exception.BQTableUpdateFailure; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.converter.MessageRecordConverter; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.bigquery.proto.BigqueryFields; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.stencil.OdpfStencilUpdateListener; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.converter.MessageRecordConverter; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.exception.BQTableUpdateFailure; +import org.raystack.depot.bigquery.proto.BigqueryFields; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.stencil.DepotStencilUpdateListener; import java.util.ArrayList; import java.util.HashSet; @@ -23,25 +23,27 @@ import java.util.Set; import java.util.stream.Collectors; -public class BigqueryJsonUpdateListener extends OdpfStencilUpdateListener { +public class BigqueryJsonUpdateListener extends DepotStencilUpdateListener { private final MessageRecordConverterCache converterCache; private final BigQuerySinkConfig config; private final BigQueryClient bigQueryClient; private final Instrumentation instrumentation; - public BigqueryJsonUpdateListener(BigQuerySinkConfig config, MessageRecordConverterCache converterCache, BigQueryClient bigQueryClient, Instrumentation instrumentation) { + public BigqueryJsonUpdateListener(BigQuerySinkConfig config, MessageRecordConverterCache converterCache, + BigQueryClient bigQueryClient, Instrumentation instrumentation) { this.converterCache = converterCache; this.config = config; this.bigQueryClient = bigQueryClient; this.instrumentation = instrumentation; if (!config.getSinkBigqueryDynamicSchemaEnable()) { - throw new UnsupportedOperationException("currently only schema inferred from incoming data is supported, stencil schema support for json will be added in future"); + throw new UnsupportedOperationException( + "currently only schema inferred from incoming data is supported, stencil schema support for json will be added in future"); } } @Override public void updateSchema() { - OdpfMessageParser parser = getOdpfMessageParser(); + MessageParser parser = getMessageParser(); MessageRecordConverter messageRecordConverter = new MessageRecordConverter(parser, config, null); converterCache.setMessageRecordConverter(messageRecordConverter); List defaultColumns = config.getSinkBigqueryDefaultColumns(); @@ -50,7 +52,8 @@ public void updateSchema() { .map(this::getField) .collect(Collectors.toCollection(HashSet::new)); if (config.shouldAddMetadata() && !config.getBqMetadataNamespace().isEmpty()) { - throw new UnsupportedOperationException("metadata namespace is not supported, because nested json structure is not supported"); + throw new UnsupportedOperationException( + "metadata namespace is not supported, because nested json structure is not supported"); } addMetadataFields(fieldsToBeUpdated, defaultColumns); try { @@ -66,7 +69,8 @@ public void updateSchema() { } /* - throws error incase there are duplicate fields between metadata and default columns config + * throws error incase there are duplicate fields between metadata and default + * columns config */ private void addMetadataFields(HashSet fieldsToBeUpdated, List defaultColumns) { if (config.shouldAddMetadata()) { @@ -81,7 +85,8 @@ private void addMetadataFields(HashSet fieldsToBeUpdated, List defaultColumnNames.contains(m.getName())).findFirst(); if (duplicateField.isPresent()) { String duplicateFieldName = duplicateField.get().getName(); - instrumentation.logError("duplicate key found in default columns and metadata config {}", duplicateFieldName); + instrumentation.logError("duplicate key found in default columns and metadata config {}", + duplicateFieldName); throw new IllegalArgumentException("duplicate field called " + duplicateFieldName + " is present in both default columns config and metadata config"); @@ -97,7 +102,8 @@ private Field getField(TupleString tupleString) { } /** - * Range BigQuery partitioning is not supported, supported partition fields have to be of DATE or TIMESTAMP type.. + * Range BigQuery partitioning is not supported, supported partition fields have + * to be of DATE or TIMESTAMP type.. */ private Field checkAndCreateField(String fieldName, LegacySQLTypeName fieldDataType) { Boolean isPartitioningEnabled = config.isTablePartitioningEnabled(); @@ -105,9 +111,11 @@ private Field checkAndCreateField(String fieldName, LegacySQLTypeName fieldDataT return Field.newBuilder(fieldName, fieldDataType).setMode(Field.Mode.NULLABLE).build(); } String partitionKey = config.getTablePartitionKey(); - boolean isValidPartitionDataType = (fieldDataType == LegacySQLTypeName.TIMESTAMP || fieldDataType == LegacySQLTypeName.DATE); + boolean isValidPartitionDataType = (fieldDataType == LegacySQLTypeName.TIMESTAMP + || fieldDataType == LegacySQLTypeName.DATE); if (partitionKey.equals(fieldName) && !isValidPartitionDataType) { - throw new UnsupportedOperationException("supported partition fields have to be of DATE or TIMESTAMP type.."); + throw new UnsupportedOperationException( + "supported partition fields have to be of DATE or TIMESTAMP type.."); } return Field.newBuilder(fieldName, fieldDataType).setMode(Field.Mode.NULLABLE).build(); } diff --git a/src/main/java/org/raystack/depot/bigquery/models/BQField.java b/src/main/java/org/raystack/depot/bigquery/models/BQField.java new file mode 100644 index 00000000..e6b4e4c2 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/models/BQField.java @@ -0,0 +1,113 @@ +package org.raystack.depot.bigquery.models; + +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.FieldList; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.protobuf.DescriptorProtos; +import org.raystack.depot.bigquery.exception.BQSchemaMappingException; +import org.raystack.depot.message.proto.Constants; +import org.raystack.depot.message.proto.ProtoField; +import lombok.EqualsAndHashCode; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +@EqualsAndHashCode +public class BQField { + private static final Map FIELD_LABEL_TO_BQ_MODE_MAP = new HashMap() { + { + put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, Field.Mode.NULLABLE); + put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, Field.Mode.REPEATED); + put(DescriptorProtos.FieldDescriptorProto.Label.LABEL_REQUIRED, Field.Mode.REQUIRED); + } + }; + private static final Map FIELD_TYPE_TO_BQ_TYPE_MAP = new HashMap() { + { + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, LegacySQLTypeName.BYTES); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, LegacySQLTypeName.STRING); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, LegacySQLTypeName.STRING); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, LegacySQLTypeName.FLOAT); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, LegacySQLTypeName.FLOAT); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, LegacySQLTypeName.BOOLEAN); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT64, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT32, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED64, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED32, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED32, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED64, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT32, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT64, LegacySQLTypeName.INTEGER); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, LegacySQLTypeName.RECORD); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_GROUP, LegacySQLTypeName.RECORD); + } + }; + private static final Map FIELD_NAME_TO_BQ_TYPE_MAP = new HashMap() { + { + put(Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME, LegacySQLTypeName.TIMESTAMP); + put(Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME, LegacySQLTypeName.STRING); + put(Constants.ProtobufTypeName.DURATION_PROTOBUF_TYPE_NAME, LegacySQLTypeName.RECORD); + } + }; + private final String name; + private final Field.Mode mode; + private final LegacySQLTypeName type; + private List subFields; + + public BQField(String name, Field.Mode mode, LegacySQLTypeName type, List subFields) { + this.name = name; + this.mode = mode; + this.type = type; + this.subFields = subFields; + } + + public BQField(ProtoField protoField) { + this.name = protoField.getName(); + this.mode = FIELD_LABEL_TO_BQ_MODE_MAP.get(protoField.getLabel()); + this.type = getType(protoField); + this.subFields = new ArrayList<>(); + } + + /** + * Map fully qualified type name or protobuf type to bigquery types. + * Fully qualified name will be used as mapping key before protobuf type being + * used + * + * @param protoField + * @return + */ + private LegacySQLTypeName getType(ProtoField protoField) { + LegacySQLTypeName typeFromFieldName = FIELD_NAME_TO_BQ_TYPE_MAP.get(protoField.getTypeName()) != null + ? FIELD_NAME_TO_BQ_TYPE_MAP.get(protoField.getTypeName()) + : FIELD_TYPE_TO_BQ_TYPE_MAP.get(protoField.getType()); + if (typeFromFieldName == null) { + throw new BQSchemaMappingException( + String.format("No type mapping found for field: %s, fieldType: %s, typeName: %s", + protoField.getName(), protoField.getType(), protoField.getTypeName())); + } + return typeFromFieldName; + } + + public void setSubFields(List fields) { + this.subFields = fields; + } + + public Field getField() { + if (this.subFields == null || this.subFields.size() == 0) { + return Field.newBuilder(this.name, this.type).setMode(this.mode).build(); + } + return Field.newBuilder(this.name, this.type, FieldList.of(subFields)).setMode(this.mode).build(); + } + + public String getName() { + return name; + } + + public LegacySQLTypeName getType() { + return type; + } + +} diff --git a/src/main/java/io/odpf/depot/bigquery/models/Record.java b/src/main/java/org/raystack/depot/bigquery/models/Record.java similarity index 82% rename from src/main/java/io/odpf/depot/bigquery/models/Record.java rename to src/main/java/org/raystack/depot/bigquery/models/Record.java index 676df8bf..54a5dbbd 100644 --- a/src/main/java/io/odpf/depot/bigquery/models/Record.java +++ b/src/main/java/org/raystack/depot/bigquery/models/Record.java @@ -1,6 +1,6 @@ -package io.odpf.depot.bigquery.models; +package org.raystack.depot.bigquery.models; -import io.odpf.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorInfo; import lombok.AllArgsConstructor; import lombok.Builder; import lombok.EqualsAndHashCode; diff --git a/src/main/java/io/odpf/depot/bigquery/models/Records.java b/src/main/java/org/raystack/depot/bigquery/models/Records.java similarity index 87% rename from src/main/java/io/odpf/depot/bigquery/models/Records.java rename to src/main/java/org/raystack/depot/bigquery/models/Records.java index 59392f9a..0ffcfc37 100644 --- a/src/main/java/io/odpf/depot/bigquery/models/Records.java +++ b/src/main/java/org/raystack/depot/bigquery/models/Records.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.models; +package org.raystack.depot.bigquery.models; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; diff --git a/src/main/java/io/odpf/depot/bigquery/proto/BigqueryFields.java b/src/main/java/org/raystack/depot/bigquery/proto/BigqueryFields.java similarity index 67% rename from src/main/java/io/odpf/depot/bigquery/proto/BigqueryFields.java rename to src/main/java/org/raystack/depot/bigquery/proto/BigqueryFields.java index 40bbc2d4..cc8d5b4c 100644 --- a/src/main/java/io/odpf/depot/bigquery/proto/BigqueryFields.java +++ b/src/main/java/org/raystack/depot/bigquery/proto/BigqueryFields.java @@ -1,11 +1,11 @@ -package io.odpf.depot.bigquery.proto; +package org.raystack.depot.bigquery.proto; import com.google.cloud.bigquery.Field; import com.google.cloud.bigquery.FieldList; import com.google.cloud.bigquery.LegacySQLTypeName; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.message.proto.ProtoField; -import io.odpf.depot.bigquery.models.BQField; +import org.raystack.depot.bigquery.models.BQField; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.message.proto.ProtoField; import java.util.ArrayList; import java.util.List; @@ -16,21 +16,26 @@ public static List getMetadataFields(List metadataColumnsTyp return metadataColumnsTypes.stream().map( tuple -> Field.newBuilder(tuple.getFirst(), LegacySQLTypeName.valueOf(tuple.getSecond())) .setMode(Field.Mode.NULLABLE) - .build()).collect(Collectors.toList()); + .build()) + .collect(Collectors.toList()); } /* - throws an exception if typeName is not recognized by LegacySQLTypeName.valueOfStric + * throws an exception if typeName is not recognized by + * LegacySQLTypeName.valueOfStric */ public static List getMetadataFieldsStrict(List metadataColumnsTypes) { return metadataColumnsTypes.stream().map( - tuple -> Field.newBuilder(tuple.getFirst(), LegacySQLTypeName.valueOfStrict(tuple.getSecond().toUpperCase())) + tuple -> Field + .newBuilder(tuple.getFirst(), LegacySQLTypeName.valueOfStrict(tuple.getSecond().toUpperCase())) .setMode(Field.Mode.NULLABLE) - .build()).collect(Collectors.toList()); + .build()) + .collect(Collectors.toList()); } public static Field getNamespacedMetadataField(String namespace, List metadataColumnsTypes) { - return Field.newBuilder(namespace, LegacySQLTypeName.RECORD, FieldList.of(getMetadataFields(metadataColumnsTypes))) + return Field + .newBuilder(namespace, LegacySQLTypeName.RECORD, FieldList.of(getMetadataFields(metadataColumnsTypes))) .setMode(Field.Mode.NULLABLE) .build(); } diff --git a/src/main/java/io/odpf/depot/bigquery/proto/BigqueryProtoUpdateListener.java b/src/main/java/org/raystack/depot/bigquery/proto/BigqueryProtoUpdateListener.java similarity index 67% rename from src/main/java/io/odpf/depot/bigquery/proto/BigqueryProtoUpdateListener.java rename to src/main/java/org/raystack/depot/bigquery/proto/BigqueryProtoUpdateListener.java index 07d11a2e..49231e74 100644 --- a/src/main/java/io/odpf/depot/bigquery/proto/BigqueryProtoUpdateListener.java +++ b/src/main/java/org/raystack/depot/bigquery/proto/BigqueryProtoUpdateListener.java @@ -1,21 +1,21 @@ -package io.odpf.depot.bigquery.proto; +package org.raystack.depot.bigquery.proto; import com.google.cloud.bigquery.BigQueryException; import com.google.cloud.bigquery.Field; import com.google.protobuf.Descriptors.Descriptor; -import io.odpf.depot.bigquery.exception.BQSchemaMappingException; -import io.odpf.depot.bigquery.exception.BQTableUpdateFailure; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.converter.MessageRecordConverter; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.proto.ProtoField; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.message.proto.ProtoOdpfMessageSchema; -import io.odpf.depot.stencil.OdpfStencilUpdateListener; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.converter.MessageRecordConverter; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.exception.BQSchemaMappingException; +import org.raystack.depot.bigquery.exception.BQTableUpdateFailure; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.proto.ProtoField; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.message.proto.ProtoMessageSchema; +import org.raystack.depot.stencil.DepotStencilUpdateListener; import lombok.Getter; import lombok.extern.slf4j.Slf4j; @@ -26,13 +26,14 @@ import java.util.stream.Collectors; @Slf4j -public class BigqueryProtoUpdateListener extends OdpfStencilUpdateListener { +public class BigqueryProtoUpdateListener extends DepotStencilUpdateListener { private final BigQuerySinkConfig config; private final BigQueryClient bqClient; @Getter private final MessageRecordConverterCache converterCache; - public BigqueryProtoUpdateListener(BigQuerySinkConfig config, BigQueryClient bqClient, MessageRecordConverterCache converterCache) { + public BigqueryProtoUpdateListener(BigQuerySinkConfig config, BigQueryClient bqClient, + MessageRecordConverterCache converterCache) { this.config = config; this.bqClient = bqClient; this.converterCache = converterCache; @@ -44,19 +45,20 @@ public void onSchemaUpdate(Map newDescriptors) { try { SinkConnectorSchemaMessageMode mode = config.getSinkConnectorSchemaMessageMode(); String schemaClass = mode == SinkConnectorSchemaMessageMode.LOG_MESSAGE - ? config.getSinkConnectorSchemaProtoMessageClass() : config.getSinkConnectorSchemaProtoKeyClass(); - ProtoOdpfMessageParser odpfMessageParser = (ProtoOdpfMessageParser) getOdpfMessageParser(); - OdpfMessageSchema schema; + ? config.getSinkConnectorSchemaProtoMessageClass() + : config.getSinkConnectorSchemaProtoKeyClass(); + ProtoMessageParser messageParser = (ProtoMessageParser) getMessageParser(); + MessageSchema schema; if (newDescriptors == null) { - schema = odpfMessageParser.getSchema(schemaClass); + schema = messageParser.getSchema(schemaClass); } else { - schema = odpfMessageParser.getSchema(schemaClass, newDescriptors); + schema = messageParser.getSchema(schemaClass, newDescriptors); } - ProtoField protoField = ((ProtoOdpfMessageSchema) schema).getProtoField(); + ProtoField protoField = ((ProtoMessageSchema) schema).getProtoField(); List bqSchemaFields = BigqueryFields.generateBigquerySchema(protoField); addMetadataFields(bqSchemaFields); bqClient.upsertTable(bqSchemaFields); - converterCache.setMessageRecordConverter(new MessageRecordConverter(odpfMessageParser, config, schema)); + converterCache.setMessageRecordConverter(new MessageRecordConverter(messageParser, config, schema)); } catch (BigQueryException | IOException e) { String errMsg = "Error while updating bigquery table on callback:" + e.getMessage(); log.error(errMsg); @@ -81,7 +83,8 @@ private void addMetadataFields(List bqSchemaFields) { } } - List duplicateFields = getDuplicateFields(bqSchemaFields, bqMetadataFields).stream().map(Field::getName).collect(Collectors.toList()); + List duplicateFields = getDuplicateFields(bqSchemaFields, bqMetadataFields).stream().map(Field::getName) + .collect(Collectors.toList()); if (duplicateFields.size() > 0) { throw new BQSchemaMappingException(String.format("Metadata field(s) is already present in the schema. " + "fields: %s", duplicateFields)); diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryPayload.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryPayload.java new file mode 100644 index 00000000..da9c4a69 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryPayload.java @@ -0,0 +1,45 @@ +package org.raystack.depot.bigquery.storage; + +import org.raystack.depot.bigquery.storage.proto.BigQueryRecordMeta; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; + +public class BigQueryPayload implements Iterable { + private final List recordMetadata = new ArrayList<>(); + private final Map payloadIndexToInputIndex = new HashMap<>(); + private Object payload; + + public void addMetadataRecord(BigQueryRecordMeta record) { + recordMetadata.add(record); + } + + public void putValidIndexToInputIndex(long validIndex, long inputIndex) { + payloadIndexToInputIndex.put(validIndex, inputIndex); + } + + public long getInputIndex(long payloadIndex) { + return payloadIndexToInputIndex.get(payloadIndex); + } + + public Set getPayloadIndexes() { + return payloadIndexToInputIndex.keySet(); + } + + public Iterator iterator() { + return recordMetadata.iterator(); + } + + public Object getPayload() { + return payload; + } + + public void setPayload(Object payload) { + this.payload = payload; + } + +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageClient.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageClient.java new file mode 100644 index 00000000..78a3c0b6 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageClient.java @@ -0,0 +1,14 @@ +package org.raystack.depot.bigquery.storage; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import org.raystack.depot.message.Message; + +import java.io.Closeable; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public interface BigQueryStorageClient extends Closeable { + BigQueryPayload convert(List messages); + + AppendRowsResponse appendAndGet(BigQueryPayload payload) throws ExecutionException, InterruptedException; +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageClientFactory.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageClientFactory.java new file mode 100644 index 00000000..e83705e7 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageClientFactory.java @@ -0,0 +1,19 @@ +package org.raystack.depot.bigquery.storage; + +import org.raystack.depot.bigquery.storage.proto.BigQueryProtoStorageClient; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.MessageParser; + +public class BigQueryStorageClientFactory { + public static BigQueryStorageClient createBigQueryStorageClient( + BigQuerySinkConfig config, + MessageParser parser, + BigQueryWriter bigQueryWriter) { + switch (config.getSinkConnectorSchemaDataType()) { + case PROTOBUF: + return new BigQueryProtoStorageClient(bigQueryWriter, config, parser); + default: + throw new IllegalArgumentException("Invalid data type"); + } + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageResponseParser.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageResponseParser.java new file mode 100644 index 00000000..3b16244c --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStorageResponseParser.java @@ -0,0 +1,195 @@ +package org.raystack.depot.bigquery.storage; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.RowError; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import io.grpc.Status.Code; +import com.google.rpc.Status; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; + +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.stream.IntStream; + +public class BigQueryStorageResponseParser { + private static final Set RETRYABLE_ERROR_CODES = new HashSet() { + { + add(Code.INTERNAL); + add(Code.ABORTED); + add(Code.CANCELLED); + add(Code.FAILED_PRECONDITION); + add(Code.DEADLINE_EXCEEDED); + add(Code.UNAVAILABLE); + } + }; + private final BigQuerySinkConfig sinkConfig; + private final Instrumentation instrumentation; + private final BigQueryMetrics bigQueryMetrics; + + public BigQueryStorageResponseParser( + BigQuerySinkConfig sinkConfig, + Instrumentation instrumentation, + BigQueryMetrics bigQueryMetrics) { + this.sinkConfig = sinkConfig; + this.instrumentation = instrumentation; + this.bigQueryMetrics = bigQueryMetrics; + } + + public static ErrorInfo getError(Status error) { + com.google.rpc.Code code = com.google.rpc.Code.forNumber(error.getCode()); + switch (code) { + case OK: + return null; + case CANCELLED: + case INVALID_ARGUMENT: + case NOT_FOUND: + case ALREADY_EXISTS: + case PERMISSION_DENIED: + case UNAUTHENTICATED: + case RESOURCE_EXHAUSTED: + case FAILED_PRECONDITION: + case ABORTED: + case OUT_OF_RANGE: + return new ErrorInfo(new Exception(error.getMessage()), ErrorType.SINK_4XX_ERROR); + case UNKNOWN: + case INTERNAL: + case DATA_LOSS: + case UNAVAILABLE: + case UNIMPLEMENTED: + case UNRECOGNIZED: + case DEADLINE_EXCEEDED: + return new ErrorInfo(new Exception(error.getMessage()), ErrorType.SINK_5XX_ERROR); + default: + return new ErrorInfo(new Exception(error.getMessage()), ErrorType.SINK_UNKNOWN_ERROR); + } + } + + public static boolean shouldRetry(io.grpc.Status status) { + return BigQueryStorageResponseParser.RETRYABLE_ERROR_CODES.contains(status.getCode()); + } + + public static ErrorInfo get4xxError(RowError rowError) { + return new ErrorInfo(new Exception(rowError.getMessage()), ErrorType.SINK_4XX_ERROR); + } + + public static AppendRowsResponse get4xxErrorResponse() { + return AppendRowsResponse.newBuilder() + .setError(Status.newBuilder().setCode(com.google.rpc.Code.FAILED_PRECONDITION.ordinal()).build()) + .build(); + } + + public void setSinkResponseForInvalidMessages( + BigQueryPayload payload, + List messages, + SinkResponse sinkResponse) { + + payload.forEach(meta -> { + if (!meta.isValid()) { + sinkResponse.addErrors(meta.getInputIndex(), meta.getErrorInfo()); + instrumentation.logError( + "Error {} occurred while converting to payload for record {}", + meta.getErrorInfo(), + messages.get((int) meta.getInputIndex()).getMetadataString()); + } + }); + } + + private void instrumentErrors(Object error) { + instrumentation.incrementCounter( + bigQueryMetrics.getBigqueryTotalErrorsMetrics(), + String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, sinkConfig.getTableName()), + String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, sinkConfig.getDatasetName()), + String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, sinkConfig.getGCloudProjectID()), + String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, error.toString())); + } + + public void setSinkResponseForErrors( + BigQueryPayload payload, + AppendRowsResponse appendRowsResponse, + List messages, + SinkResponse sinkResponse) { + if (appendRowsResponse.hasError()) { + instrumentation.logError("received an error in stream :{} ", appendRowsResponse.getError()); + com.google.rpc.Status error = appendRowsResponse.getError(); + ErrorInfo errorInfo = BigQueryStorageResponseParser.getError(error); + Set payloadIndexes = payload.getPayloadIndexes(); + com.google.rpc.Code code = com.google.rpc.Code.forNumber(error.getCode()); + payloadIndexes.forEach(index -> { + long inputIndex = payload.getInputIndex(index); + sinkResponse.addErrors(inputIndex, errorInfo); + instrumentErrors(code); + }); + } + + // per message error + List rowErrorsList = appendRowsResponse.getRowErrorsList(); + rowErrorsList.forEach(rowError -> { + ErrorInfo errorInfo = BigQueryStorageResponseParser.get4xxError(rowError); + long inputIndex = payload.getInputIndex(rowError.getIndex()); + sinkResponse.addErrors(inputIndex, errorInfo); + String metadataString = messages.get((int) inputIndex).getMetadataString(); + instrumentation.logError( + "Error {} occurred while sending the payload for record {} with RowError {}", + errorInfo, + metadataString, + rowError); + instrumentErrors(rowError.getCode()); + }); + } + + public void setSinkResponseForException( + Throwable cause, + BigQueryPayload payload, + List messages, + SinkResponse sinkResponse) { + io.grpc.Status status = io.grpc.Status.fromThrowable(cause); + instrumentation.logError("Error from exception: {} ", status); + if (cause instanceof Exceptions.AppendSerializationError) { + // first set all messages to retryable + IntStream.range(0, payload.getPayloadIndexes().size()) + .forEach(index -> { + sinkResponse.addErrors(payload.getInputIndex(index), + new ErrorInfo(new Exception(cause), ErrorType.SINK_5XX_ERROR)); + instrumentErrors(status.getCode()); + }); + // then set non retryable messages + Exceptions.AppendSerializationError ase = (Exceptions.AppendSerializationError) cause; + Map rowIndexToErrorMessage = ase.getRowIndexToErrorMessage(); + rowIndexToErrorMessage.forEach((index, err) -> { + long inputIndex = payload.getInputIndex(index); + String metadataString = messages.get((int) inputIndex).getMetadataString(); + ErrorInfo errorInfo = new ErrorInfo(new Exception(err), ErrorType.SINK_4XX_ERROR); + instrumentation.logError( + "Error {} occurred while sending the payload for record {}", + errorInfo, + metadataString); + sinkResponse.addErrors(inputIndex, errorInfo); + instrumentErrors(BigQueryMetrics.BigQueryStorageAPIError.ROW_APPEND_ERROR); + }); + } else { + if (BigQueryStorageResponseParser.shouldRetry(status)) { + IntStream.range(0, payload.getPayloadIndexes().size()) + .forEach(index -> { + sinkResponse.addErrors(payload.getInputIndex(index), + new ErrorInfo(new Exception(cause), ErrorType.SINK_5XX_ERROR)); + instrumentErrors(status.getCode()); + }); + } else { + IntStream.range(0, payload.getPayloadIndexes().size()) + .forEach(index -> { + sinkResponse.addErrors(payload.getInputIndex(index), + new ErrorInfo(new Exception(cause), ErrorType.SINK_4XX_ERROR)); + instrumentErrors(status.getCode()); + }); + } + } + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStream.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStream.java new file mode 100644 index 00000000..94d838da --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryStream.java @@ -0,0 +1,4 @@ +package org.raystack.depot.bigquery.storage; + +public interface BigQueryStream { +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriter.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriter.java new file mode 100644 index 00000000..697f3df1 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriter.java @@ -0,0 +1,12 @@ +package org.raystack.depot.bigquery.storage; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; + +import java.util.concurrent.ExecutionException; + +public interface BigQueryWriter extends AutoCloseable { + + void init(); + + AppendRowsResponse appendAndGet(BigQueryPayload payload) throws ExecutionException, InterruptedException; +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriterFactory.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriterFactory.java new file mode 100644 index 00000000..1af0cb7e --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriterFactory.java @@ -0,0 +1,34 @@ +package org.raystack.depot.bigquery.storage; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.ProtoSchema; +import org.raystack.depot.bigquery.storage.proto.BigQueryProtoWriter; +import org.raystack.depot.bigquery.storage.json.BigQueryJsonWriter; +import org.raystack.depot.common.Function3; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; + +import java.util.function.Function; + +public class BigQueryWriterFactory { + + public static BigQueryWriter createBigQueryWriter( + BigQuerySinkConfig config, + Function bqWriterCreator, + Function credCreator, + Function3 streamCreator, + Instrumentation instrumentation, + BigQueryMetrics metrics) { + switch (config.getSinkConnectorSchemaDataType()) { + case PROTOBUF: + return new BigQueryProtoWriter(config, bqWriterCreator, credCreator, streamCreator, instrumentation, + metrics); + case JSON: + return new BigQueryJsonWriter(config); + default: + throw new IllegalArgumentException("Couldn't initialise the BQ writer"); + } + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriterUtils.java b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriterUtils.java new file mode 100644 index 00000000..7022f2e9 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/BigQueryWriterUtils.java @@ -0,0 +1,60 @@ +package org.raystack.depot.bigquery.storage; + +import com.google.api.gax.core.CredentialsProvider; +import com.google.api.gax.core.FixedCredentialsProvider; +import com.google.auth.oauth2.GoogleCredentials; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteSettings; +import com.google.cloud.bigquery.storage.v1.ProtoSchema; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableName; +import org.raystack.depot.bigquery.storage.proto.BigQueryProtoStream; +import org.raystack.depot.config.BigQuerySinkConfig; + +import java.io.FileInputStream; +import java.io.IOException; + +public class BigQueryWriterUtils { + private static final String DEFAULT_STREAM_SUFFIX = "/_default"; + + public static BigQueryWriteClient getBigQueryWriterClient(BigQuerySinkConfig config) { + try { + + BigQueryWriteSettings settings = BigQueryWriteSettings.newBuilder() + .setCredentialsProvider(getCredentialsProvider(config)) + .build(); + return BigQueryWriteClient.create(settings); + } catch (IOException e) { + throw new IllegalArgumentException("Can't initialise writer client", e); + } + } + + public static CredentialsProvider getCredentialsProvider(BigQuerySinkConfig config) { + try { + return FixedCredentialsProvider.create( + GoogleCredentials.fromStream(new FileInputStream(config.getBigQueryCredentialPath()))); + } catch (IOException e) { + throw new IllegalArgumentException("Can't initialise credential provider", e); + } + } + + public static BigQueryStream getStreamWriter(BigQuerySinkConfig config, CredentialsProvider credentialsProvider, + ProtoSchema schema) { + try { + String streamName = getDefaultStreamName(config); + StreamWriter.Builder builder = StreamWriter.newBuilder(streamName); + builder.setCredentialsProvider(credentialsProvider); + builder.setWriterSchema(schema); + builder.setEnableConnectionPool(false); + return new BigQueryProtoStream(builder.build()); + } catch (IOException e) { + throw new IllegalArgumentException("Can't initialise Stream Writer", e); + } + } + + public static String getDefaultStreamName(BigQuerySinkConfig config) { + TableName parentTable = TableName.of(config.getGCloudProjectID(), config.getDatasetName(), + config.getTableName()); + return parentTable.toString() + DEFAULT_STREAM_SUFFIX; + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/json/BigQueryJsonStream.java b/src/main/java/org/raystack/depot/bigquery/storage/json/BigQueryJsonStream.java new file mode 100644 index 00000000..43318e1c --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/json/BigQueryJsonStream.java @@ -0,0 +1,11 @@ +package org.raystack.depot.bigquery.storage.json; + +import com.google.cloud.bigquery.storage.v1.JsonStreamWriter; +import org.raystack.depot.bigquery.storage.BigQueryStream; + +public class BigQueryJsonStream implements BigQueryStream { + + public JsonStreamWriter getStreamWriter() { + return null; + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/json/BigQueryJsonWriter.java b/src/main/java/org/raystack/depot/bigquery/storage/json/BigQueryJsonWriter.java new file mode 100644 index 00000000..f903dc91 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/json/BigQueryJsonWriter.java @@ -0,0 +1,30 @@ +package org.raystack.depot.bigquery.storage.json; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import org.raystack.depot.bigquery.storage.BigQueryPayload; +import org.raystack.depot.bigquery.storage.BigQueryWriter; +import org.raystack.depot.config.BigQuerySinkConfig; + +import java.util.concurrent.ExecutionException; + +public class BigQueryJsonWriter implements BigQueryWriter { + + public BigQueryJsonWriter(BigQuerySinkConfig config) { + + } + + @Override + public void init() { + + } + + @Override + public AppendRowsResponse appendAndGet(BigQueryPayload payload) throws ExecutionException, InterruptedException { + return null; + } + + @Override + public void close() throws Exception { + + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStorageClient.java b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStorageClient.java new file mode 100644 index 00000000..1ef6dfa6 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStorageClient.java @@ -0,0 +1,160 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.ProtoRows; +import com.google.protobuf.Descriptors; +import com.google.protobuf.DynamicMessage; +import org.raystack.depot.bigquery.storage.BigQueryPayload; +import org.raystack.depot.bigquery.storage.BigQueryStorageClient; +import org.raystack.depot.bigquery.storage.BigQueryWriter; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.DeserializerException; +import org.raystack.depot.exception.EmptyMessageException; +import org.raystack.depot.exception.UnknownFieldsException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.proto.converter.fields.DurationProtoField; +import org.raystack.depot.message.proto.converter.fields.MessageProtoField; +import org.raystack.depot.message.proto.converter.fields.ProtoField; +import org.raystack.depot.message.proto.converter.fields.ProtoFieldFactory; + +import java.io.IOException; +import java.time.Instant; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.ExecutionException; + +public class BigQueryProtoStorageClient implements BigQueryStorageClient { + + private final BigQueryProtoWriter writer; + private final BigQuerySinkConfig config; + private final MessageParser parser; + private final String schemaClass; + private final SinkConnectorSchemaMessageMode mode; + + public BigQueryProtoStorageClient(BigQueryWriter writer, BigQuerySinkConfig config, MessageParser parser) { + this.writer = (BigQueryProtoWriter) writer; + this.config = config; + this.parser = parser; + this.mode = config.getSinkConnectorSchemaMessageMode(); + this.schemaClass = mode == SinkConnectorSchemaMessageMode.LOG_MESSAGE + ? config.getSinkConnectorSchemaProtoMessageClass() + : config.getSinkConnectorSchemaProtoKeyClass(); + } + + public BigQueryPayload convert(List messages) { + ProtoRows.Builder rowBuilder = ProtoRows.newBuilder(); + BigQueryPayload payload = new BigQueryPayload(); + Descriptors.Descriptor descriptor = writer.getDescriptor(); + long validIndex = 0; + for (int index = 0; index < messages.size(); index++) { + Message message = messages.get(index); + try { + DynamicMessage convertedMessage = convert(message, descriptor); + BigQueryRecordMeta metadata = new BigQueryRecordMeta(index, null, true); + payload.addMetadataRecord(metadata); + payload.putValidIndexToInputIndex(validIndex++, index); + rowBuilder.addSerializedRows(convertedMessage.toByteString()); + } catch (UnknownFieldsException e) { + ErrorInfo errorInfo = new ErrorInfo(e, ErrorType.UNKNOWN_FIELDS_ERROR); + BigQueryRecordMeta metadata = new BigQueryRecordMeta(index, errorInfo, false); + payload.addMetadataRecord(metadata); + } catch (EmptyMessageException | UnsupportedOperationException e) { + ErrorInfo errorInfo = new ErrorInfo(e, ErrorType.INVALID_MESSAGE_ERROR); + BigQueryRecordMeta metadata = new BigQueryRecordMeta(index, errorInfo, false); + payload.addMetadataRecord(metadata); + } catch (DeserializerException | IllegalArgumentException | IOException e) { + ErrorInfo errorInfo = new ErrorInfo(e, ErrorType.DESERIALIZATION_ERROR); + BigQueryRecordMeta metadata = new BigQueryRecordMeta(index, errorInfo, false); + payload.addMetadataRecord(metadata); + } catch (Exception e) { + ErrorInfo errorInfo = new ErrorInfo(e, ErrorType.SINK_UNKNOWN_ERROR); + BigQueryRecordMeta metadata = new BigQueryRecordMeta(index, errorInfo, false); + payload.addMetadataRecord(metadata); + } + } + payload.setPayload(rowBuilder.build()); + return payload; + } + + @Override + public AppendRowsResponse appendAndGet(BigQueryPayload payload) throws ExecutionException, InterruptedException { + return writer.appendAndGet(payload); + } + + private DynamicMessage convert(Message message, Descriptors.Descriptor descriptor) throws IOException { + ParsedMessage parsedMessage = parser.parse(message, mode, schemaClass); + parsedMessage.validate(config); + DynamicMessage.Builder messageBuilder = convert((DynamicMessage) parsedMessage.getRaw(), descriptor, true); + BigQueryProtoUtils.addMetadata(message.getMetadata(), messageBuilder, descriptor, config); + return messageBuilder.build(); + } + + private DynamicMessage.Builder convert(DynamicMessage inputMessage, Descriptors.Descriptor descriptor, + boolean isTopLevel) { + DynamicMessage.Builder messageBuilder = DynamicMessage.newBuilder(descriptor); + List allFields = inputMessage.getDescriptorForType().getFields(); + for (Descriptors.FieldDescriptor inputField : allFields) { + Descriptors.FieldDescriptor outputField = descriptor.findFieldByName(inputField.getName().toLowerCase()); + if (outputField == null) { + // not found in table + continue; + } + ProtoField protoField = ProtoFieldFactory.getField(inputField, inputMessage.getField(inputField)); + Object fieldValue = protoField.getValue(); + if (fieldValue instanceof List) { + addRepeatedFields(messageBuilder, outputField, (List) fieldValue); + continue; + } + if (fieldValue.toString().isEmpty()) { + continue; + } + if (fieldValue instanceof Instant) { + if (((Instant) fieldValue).getEpochSecond() > 0) { + long timeStampValue = TimeStampUtils.getBQInstant((Instant) fieldValue, outputField, isTopLevel, + config); + messageBuilder.setField(outputField, timeStampValue); + } + } else if (protoField.getClass().getName().equals(MessageProtoField.class.getName()) + || protoField.getClass().getName().equals(DurationProtoField.class.getName())) { + Descriptors.Descriptor messageType = outputField.getMessageType(); + messageBuilder.setField(outputField, convert((DynamicMessage) fieldValue, messageType, false).build()); + } else { + messageBuilder.setField(outputField, fieldValue); + } + } + return messageBuilder; + } + + private void addRepeatedFields(DynamicMessage.Builder messageBuilder, Descriptors.FieldDescriptor outputField, + List fieldValue) { + if (fieldValue.isEmpty()) { + return; + } + List repeatedNestedFields = new ArrayList<>(); + for (Object f : fieldValue) { + if (f instanceof DynamicMessage) { + Descriptors.Descriptor messageType = outputField.getMessageType(); + repeatedNestedFields.add(convert((DynamicMessage) f, messageType, false).build()); + } else { + if (f instanceof Instant) { + if (((Instant) f).getEpochSecond() > 0) { + repeatedNestedFields.add(TimeStampUtils.getBQInstant((Instant) f, outputField, false, config)); + } + } else { + repeatedNestedFields.add(f); + } + } + } + messageBuilder.setField(outputField, repeatedNestedFields); + } + + @Override + public void close() throws IOException { + writer.close(); + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStream.java b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStream.java new file mode 100644 index 00000000..9b6d9781 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStream.java @@ -0,0 +1,12 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import org.raystack.depot.bigquery.storage.BigQueryStream; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@AllArgsConstructor +public class BigQueryProtoStream implements BigQueryStream { + @Getter + private final StreamWriter streamWriter; +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoUtils.java b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoUtils.java new file mode 100644 index 00000000..5dfe7a61 --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoUtils.java @@ -0,0 +1,55 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.DynamicMessage; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.proto.converter.fields.ProtoField; +import org.raystack.depot.message.proto.converter.fields.ProtoFieldFactory; + +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +public class BigQueryProtoUtils { + public static void addMetadata( + Map metadata, + DynamicMessage.Builder messageBuilder, + Descriptors.Descriptor tableDescriptor, + BigQuerySinkConfig config) { + if (config.shouldAddMetadata()) { + List metadataColumnsTypes = config.getMetadataColumnsTypes(); + if (config.getBqMetadataNamespace().isEmpty()) { + setMetadata(metadata, messageBuilder, tableDescriptor, metadataColumnsTypes); + } else { + String namespace = config.getBqMetadataNamespace(); + Descriptors.FieldDescriptor metadataFieldDescriptor = tableDescriptor.findFieldByName(namespace); + if (metadataFieldDescriptor != null) { + Descriptors.Descriptor metadataDescriptor = metadataFieldDescriptor.getMessageType(); + DynamicMessage.Builder metadataBuilder = DynamicMessage.newBuilder(metadataDescriptor); + setMetadata(metadata, metadataBuilder, metadataDescriptor, metadataColumnsTypes); + messageBuilder.setField(metadataFieldDescriptor, metadataBuilder.build()); + } + } + } + } + + private static void setMetadata(Map metadata, + DynamicMessage.Builder messageBuilder, + Descriptors.Descriptor descriptor, + List metadataColumnsTypes) { + metadataColumnsTypes.forEach(tuple -> { + String name = tuple.getFirst(); + String type = tuple.getSecond(); + Descriptors.FieldDescriptor field = descriptor.findFieldByName(name); + if (field != null && metadata.get(name) != null) { + ProtoField protoField = ProtoFieldFactory.getField(field, metadata.get(name)); + Object fieldValue = protoField.getValue(); + if ("timestamp".equals(type) && fieldValue instanceof Long) { + fieldValue = TimeUnit.MILLISECONDS.toMicros((Long) fieldValue); + } + messageBuilder.setField(field, fieldValue); + } + }); + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoWriter.java b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoWriter.java new file mode 100644 index 00000000..4778743f --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoWriter.java @@ -0,0 +1,168 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.core.CredentialsProvider; +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.BQTableSchemaToProtoDescriptor; +import com.google.cloud.bigquery.storage.v1.BigQueryWriteClient; +import com.google.cloud.bigquery.storage.v1.GetWriteStreamRequest; +import com.google.cloud.bigquery.storage.v1.ProtoRows; +import com.google.cloud.bigquery.storage.v1.ProtoSchema; +import com.google.cloud.bigquery.storage.v1.ProtoSchemaConverter; +import com.google.cloud.bigquery.storage.v1.StreamWriter; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.cloud.bigquery.storage.v1.WriteStream; +import com.google.cloud.bigquery.storage.v1.WriteStreamView; +import com.google.protobuf.Descriptors; +import org.raystack.depot.bigquery.storage.BigQueryPayload; +import org.raystack.depot.bigquery.storage.BigQueryStorageResponseParser; +import org.raystack.depot.bigquery.storage.BigQueryStream; +import org.raystack.depot.bigquery.storage.BigQueryWriter; +import org.raystack.depot.bigquery.storage.BigQueryWriterUtils; +import org.raystack.depot.common.Function3; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import lombok.Getter; + +import java.io.IOException; +import java.time.Instant; +import java.util.concurrent.ExecutionException; +import java.util.function.Function; + +public class BigQueryProtoWriter implements BigQueryWriter { + + private final BigQuerySinkConfig config; + private final Function bqWriterCreator; + private final Function credCreator; + private final Function3 streamCreator; + private final Instrumentation instrumentation; + private final BigQueryMetrics metrics; + @Getter + private StreamWriter streamWriter; + @Getter + private Descriptors.Descriptor descriptor; + private boolean isClosed = false; + + public BigQueryProtoWriter(BigQuerySinkConfig config, + Function bqWriterCreator, + Function credCreator, + Function3 streamCreator, + Instrumentation instrumentation, BigQueryMetrics metrics) { + this.config = config; + this.bqWriterCreator = bqWriterCreator; + this.credCreator = credCreator; + this.streamCreator = streamCreator; + this.instrumentation = instrumentation; + this.metrics = metrics; + } + + @Override + public void init() { + try { + String streamName = BigQueryWriterUtils.getDefaultStreamName(config); + GetWriteStreamRequest writeStreamRequest = GetWriteStreamRequest.newBuilder() + .setName(streamName) + .setView(WriteStreamView.FULL) + .build(); + try (BigQueryWriteClient bigQueryInstance = bqWriterCreator.apply(config)) { + // This WriteStream is to get the schema of the table. + WriteStream writeStream = bigQueryInstance.getWriteStream(writeStreamRequest); + // saving the descriptor for conversion + descriptor = BQTableSchemaToProtoDescriptor + .convertBQTableSchemaToProtoDescriptor(writeStream.getTableSchema()); + streamWriter = createStreamWriter(); + } + } catch (Descriptors.DescriptorValidationException e) { + throw new IllegalArgumentException("Could not initialise the bigquery writer", e); + } + } + + @Override + public void close() throws IOException { + synchronized (this) { + isClosed = true; + instrumentation.logInfo("Closing StreamWriter"); + Instant start = Instant.now(); + streamWriter.close(); + instrument(start, BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CLOSED); + } + } + + // In the callback one can have the container and set the errors and/or log the + // response errors + @Override + public AppendRowsResponse appendAndGet(BigQueryPayload rows) throws ExecutionException, InterruptedException { + ApiFuture future; + ProtoRows payload = (ProtoRows) rows.getPayload(); + Instant start; + if (isClosed) { + instrumentation.logError("The client is permanently closed. More tasks can not be added"); + return BigQueryStorageResponseParser.get4xxErrorResponse(); + } + // need to synchronize + synchronized (this) { + TableSchema updatedSchema = streamWriter.getUpdatedSchema(); + if (updatedSchema != null) { + instrumentation.logInfo("Updated table schema detected, recreating stream writer"); + try { + // Close the StreamWriter + start = Instant.now(); + streamWriter.close(); + instrument(start, BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CLOSED); + descriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(updatedSchema); + streamWriter = createStreamWriter(); + } catch (Descriptors.DescriptorValidationException e) { + throw new IllegalArgumentException("Could not initialise the bigquery writer", e); + } + } + if (streamWriter.isClosed()) { + // somehow the stream writer is not recoverable + // we need to create a new one + streamWriter = createStreamWriter(); + } + // timer for append latency + start = Instant.now(); + future = streamWriter.append(payload); + } + AppendRowsResponse appendRowsResponse = future.get(); + instrument(start, BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_APPEND); + captureSizeMetric(payload); + return appendRowsResponse; + } + + private StreamWriter createStreamWriter() { + Instant start = Instant.now(); + BigQueryStream bigQueryStream = streamCreator.apply(config, + credCreator.apply(config), + ProtoSchemaConverter.convert(descriptor)); + instrument(start, BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CREATED); + assert (bigQueryStream instanceof BigQueryProtoStream); + return ((BigQueryProtoStream) bigQueryStream).getStreamWriter(); + } + + private void captureSizeMetric(ProtoRows payload) { + instrumentation.captureCount( + metrics.getBigqueryPayloadSizeMetrics(), + (long) payload.getSerializedSize(), + String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()), + String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()), + String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID())); + } + + private void instrument(Instant start, BigQueryMetrics.BigQueryStorageAPIType type) { + instrumentation.incrementCounter( + metrics.getBigqueryOperationTotalMetric(), + String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()), + String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()), + String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()), + String.format(BigQueryMetrics.BIGQUERY_API_TAG, type)); + instrumentation.captureDurationSince( + metrics.getBigqueryOperationLatencyMetric(), + start, + String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()), + String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()), + String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()), + String.format(BigQueryMetrics.BIGQUERY_API_TAG, type)); + } +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryRecordMeta.java b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryRecordMeta.java new file mode 100644 index 00000000..78e1bd0c --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/proto/BigQueryRecordMeta.java @@ -0,0 +1,13 @@ +package org.raystack.depot.bigquery.storage.proto; + +import org.raystack.depot.error.ErrorInfo; +import lombok.AllArgsConstructor; +import lombok.Getter; + +@AllArgsConstructor +@Getter +public class BigQueryRecordMeta { + private final long inputIndex; + private final ErrorInfo errorInfo; + private final boolean isValid; +} diff --git a/src/main/java/org/raystack/depot/bigquery/storage/proto/TimeStampUtils.java b/src/main/java/org/raystack/depot/bigquery/storage/proto/TimeStampUtils.java new file mode 100644 index 00000000..4378c8ce --- /dev/null +++ b/src/main/java/org/raystack/depot/bigquery/storage/proto/TimeStampUtils.java @@ -0,0 +1,52 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.protobuf.Descriptors; +import org.raystack.depot.config.BigQuerySinkConfig; + +import java.time.Instant; +import java.util.concurrent.TimeUnit; + +public class TimeStampUtils { + private static final long FIVE_YEARS_DAYS = 1825; + private static final long ONE_YEAR_DAYS = 365; + private static final Instant MIN_TIMESTAMP = Instant.parse("0001-01-01T00:00:00Z"); + private static final Instant MAX_TIMESTAMP = Instant.parse("9999-12-31T23:59:59.999999Z"); + + public static long getBQInstant(Instant instant, Descriptors.FieldDescriptor fieldDescriptor, boolean isTopLevel, + BigQuerySinkConfig config) { + // Timestamp should be in microseconds + long timeStamp = TimeUnit.SECONDS.toMicros(instant.getEpochSecond()) + + TimeUnit.NANOSECONDS.toMicros(instant.getNano()); + // Partition column is always top level + if (isTopLevel && fieldDescriptor.getName().equals(config.getTablePartitionKey())) { + Instant currentInstant = Instant.now(); + boolean isValid; + boolean isPastInstant = currentInstant.isAfter(instant); + if (isPastInstant) { + Instant fiveYearPast = currentInstant.minusMillis(TimeUnit.DAYS.toMillis(FIVE_YEARS_DAYS)); + isValid = fiveYearPast.isBefore(instant); + } else { + Instant oneYearFuture = currentInstant.plusMillis(TimeUnit.DAYS.toMillis(ONE_YEAR_DAYS)); + isValid = oneYearFuture.isAfter(instant); + + } + if (!isValid) { + throw new UnsupportedOperationException(instant + " for field " + + fieldDescriptor.getFullName() + " is outside the allowed bounds. " + + "You can only stream to date range within 1825 days in the past " + + "and 366 days in the future relative to the current date."); + } + return timeStamp; + } else { + // other timestamps should be in the limit specifies by BQ + if (instant.isAfter(MIN_TIMESTAMP) && instant.isBefore(MAX_TIMESTAMP)) { + return timeStamp; + } else { + throw new UnsupportedOperationException(instant + + " for field " + + fieldDescriptor.getFullName() + + " is outside the allowed bounds in BQ."); + } + } + } +} diff --git a/src/main/java/io/odpf/depot/bigtable/BigTableSink.java b/src/main/java/org/raystack/depot/bigtable/BigTableSink.java similarity index 50% rename from src/main/java/io/odpf/depot/bigtable/BigTableSink.java rename to src/main/java/org/raystack/depot/bigtable/BigTableSink.java index f59422e6..503e16b7 100644 --- a/src/main/java/io/odpf/depot/bigtable/BigTableSink.java +++ b/src/main/java/org/raystack/depot/bigtable/BigTableSink.java @@ -1,29 +1,30 @@ -package io.odpf.depot.bigtable; - -import io.odpf.depot.OdpfSink; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.bigtable.client.BigTableClient; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.parser.BigTableRecordParser; -import io.odpf.depot.bigtable.parser.BigTableResponseParser; -import io.odpf.depot.bigtable.response.BigTableResponse; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; +package org.raystack.depot.bigtable; + +import org.raystack.depot.bigtable.client.BigTableClient; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.parser.BigTableRecordParser; +import org.raystack.depot.bigtable.parser.BigTableResponseParser; +import org.raystack.depot.bigtable.response.BigTableResponse; +import org.raystack.depot.Sink; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.BigTableMetrics; +import org.raystack.depot.metrics.Instrumentation; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -public class BigTableSink implements OdpfSink { +public class BigTableSink implements Sink { private final BigTableClient bigTableClient; private final BigTableRecordParser bigTableRecordParser; private final BigTableMetrics bigtableMetrics; private final Instrumentation instrumentation; - public BigTableSink(BigTableClient bigTableClient, BigTableRecordParser bigTableRecordParser, BigTableMetrics bigtableMetrics, Instrumentation instrumentation) { + public BigTableSink(BigTableClient bigTableClient, BigTableRecordParser bigTableRecordParser, + BigTableMetrics bigtableMetrics, Instrumentation instrumentation) { this.bigTableClient = bigTableClient; this.bigTableRecordParser = bigTableRecordParser; this.bigtableMetrics = bigtableMetrics; @@ -31,25 +32,28 @@ public BigTableSink(BigTableClient bigTableClient, BigTableRecordParser bigTable } @Override - public OdpfSinkResponse pushToSink(List messages) { + public SinkResponse pushToSink(List messages) { List records = bigTableRecordParser.convert(messages); - Map> splitterRecords = records.stream().collect(Collectors.partitioningBy(BigTableRecord::isValid)); + Map> splitterRecords = records.stream() + .collect(Collectors.partitioningBy(BigTableRecord::isValid)); List invalidRecords = splitterRecords.get(Boolean.FALSE); List validRecords = splitterRecords.get(Boolean.TRUE); - OdpfSinkResponse odpfSinkResponse = new OdpfSinkResponse(); - invalidRecords.forEach(invalidRecord -> odpfSinkResponse.addErrors(invalidRecord.getIndex(), invalidRecord.getErrorInfo())); + SinkResponse sinkResponse = new SinkResponse(); + invalidRecords.forEach( + invalidRecord -> sinkResponse.addErrors(invalidRecord.getIndex(), invalidRecord.getErrorInfo())); if (validRecords.size() > 0) { BigTableResponse bigTableResponse = bigTableClient.send(validRecords); if (bigTableResponse != null && bigTableResponse.hasErrors()) { instrumentation.logInfo("Found {} Error records in response", bigTableResponse.getErrorCount()); - Map errorInfoMap = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigTableResponse, bigtableMetrics, instrumentation); - errorInfoMap.forEach(odpfSinkResponse::addErrors); + Map errorInfoMap = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, + bigTableResponse, bigtableMetrics, instrumentation); + errorInfoMap.forEach(sinkResponse::addErrors); } } - return odpfSinkResponse; + return sinkResponse; } @Override diff --git a/src/main/java/io/odpf/depot/bigtable/BigTableSinkFactory.java b/src/main/java/org/raystack/depot/bigtable/BigTableSinkFactory.java similarity index 61% rename from src/main/java/io/odpf/depot/bigtable/BigTableSinkFactory.java rename to src/main/java/org/raystack/depot/bigtable/BigTableSinkFactory.java index c1c2cf1e..a9f40c52 100644 --- a/src/main/java/io/odpf/depot/bigtable/BigTableSinkFactory.java +++ b/src/main/java/org/raystack/depot/bigtable/BigTableSinkFactory.java @@ -1,24 +1,24 @@ -package io.odpf.depot.bigtable; +package org.raystack.depot.bigtable; +import org.raystack.depot.bigtable.client.BigTableClient; +import org.raystack.depot.bigtable.model.BigTableSchema; +import org.raystack.depot.bigtable.parser.BigTableRecordParser; +import org.raystack.depot.bigtable.parser.BigTableRowKeyParser; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.exception.InvalidTemplateException; import com.timgroup.statsd.NoOpStatsDClient; -import io.odpf.depot.OdpfSink; -import io.odpf.depot.bigtable.client.BigTableClient; -import io.odpf.depot.bigtable.model.BigTableSchema; -import io.odpf.depot.bigtable.parser.BigTableRecordParser; -import io.odpf.depot.bigtable.parser.BigTableRowKeyParser; -import io.odpf.depot.common.Template; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.config.BigTableSinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.exception.InvalidTemplateException; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.OdpfMessageParserFactory; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.utils.MessageConfigUtils; +import org.raystack.depot.Sink; +import org.raystack.depot.common.Template; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.config.BigTableSinkConfig; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageParserFactory; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.metrics.BigTableMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.utils.MessageConfigUtils; import java.io.IOException; @@ -38,11 +38,11 @@ public BigTableSinkFactory(BigTableSinkConfig sinkConfig) { this(sinkConfig, new StatsDReporter(new NoOpStatsDClient())); } - public void init() { try { Instrumentation instrumentation = new Instrumentation(statsDReporter, BigTableSinkFactory.class); - String bigtableConfig = String.format("\n\tbigtable.gcloud.project = %s\n\tbigtable.instance = %s\n\tbigtable.table = %s" + String bigtableConfig = String.format( + "\n\tbigtable.gcloud.project = %s\n\tbigtable.instance = %s\n\tbigtable.table = %s" + "\n\tbigtable.credential.path = %s\n\tbigtable.row.key.template = %s\n\tbigtable.column.family.mapping = %s\n\t", sinkConfig.getGCloudProjectID(), sinkConfig.getInstanceId(), @@ -54,17 +54,19 @@ public void init() { instrumentation.logInfo(bigtableConfig); BigTableSchema bigtableSchema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); bigtableMetrics = new BigTableMetrics(sinkConfig); - bigTableClient = new BigTableClient(sinkConfig, bigtableSchema, bigtableMetrics, new Instrumentation(statsDReporter, BigTableClient.class)); + bigTableClient = new BigTableClient(sinkConfig, bigtableSchema, bigtableMetrics, + new Instrumentation(statsDReporter, BigTableClient.class)); bigTableClient.validateBigTableSchema(); - Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(sinkConfig); - OdpfMessageParser odpfMessageParser = OdpfMessageParserFactory.getParser(sinkConfig, statsDReporter); - OdpfMessageSchema schema = odpfMessageParser.getSchema(modeAndSchema.getSecond()); + Tuple modeAndSchema = MessageConfigUtils + .getModeAndSchema(sinkConfig); + MessageParser messageParser = MessageParserFactory.getParser(sinkConfig, statsDReporter); + MessageSchema schema = messageParser.getSchema(modeAndSchema.getSecond()); Template keyTemplate = new Template(sinkConfig.getRowKeyTemplate()); BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(keyTemplate, schema); bigTableRecordParser = new BigTableRecordParser( - odpfMessageParser, + messageParser, bigTableRowKeyParser, modeAndSchema, schema, @@ -75,7 +77,7 @@ public void init() { } } - public OdpfSink create() { + public Sink create() { return new BigTableSink( bigTableClient, bigTableRecordParser, diff --git a/src/main/java/io/odpf/depot/bigtable/client/BigTableClient.java b/src/main/java/org/raystack/depot/bigtable/client/BigTableClient.java similarity index 78% rename from src/main/java/io/odpf/depot/bigtable/client/BigTableClient.java rename to src/main/java/org/raystack/depot/bigtable/client/BigTableClient.java index 4da3c350..71abda26 100644 --- a/src/main/java/io/odpf/depot/bigtable/client/BigTableClient.java +++ b/src/main/java/org/raystack/depot/bigtable/client/BigTableClient.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigtable.client; +package org.raystack.depot.bigtable.client; import com.google.api.gax.core.FixedCredentialsProvider; import com.google.auth.oauth2.GoogleCredentials; @@ -9,13 +9,13 @@ import com.google.cloud.bigtable.data.v2.BigtableDataSettings; import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.MutateRowsException; -import io.odpf.depot.bigtable.exception.BigTableInvalidSchemaException; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.model.BigTableSchema; -import io.odpf.depot.bigtable.response.BigTableResponse; -import io.odpf.depot.config.BigTableSinkConfig; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.bigtable.exception.BigTableInvalidSchemaException; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.model.BigTableSchema; +import org.raystack.depot.bigtable.response.BigTableResponse; +import org.raystack.depot.config.BigTableSinkConfig; +import org.raystack.depot.metrics.BigTableMetrics; +import org.raystack.depot.metrics.Instrumentation; import java.io.FileInputStream; import java.io.IOException; @@ -32,11 +32,15 @@ public class BigTableClient { private final BigTableMetrics bigtableMetrics; private final Instrumentation instrumentation; - public BigTableClient(BigTableSinkConfig sinkConfig, BigTableSchema bigtableSchema, BigTableMetrics bigtableMetrics, Instrumentation instrumentation) throws IOException { - this(sinkConfig, getBigTableDataClient(sinkConfig), getBigTableAdminClient(sinkConfig), bigtableSchema, bigtableMetrics, instrumentation); + public BigTableClient(BigTableSinkConfig sinkConfig, BigTableSchema bigtableSchema, BigTableMetrics bigtableMetrics, + Instrumentation instrumentation) throws IOException { + this(sinkConfig, getBigTableDataClient(sinkConfig), getBigTableAdminClient(sinkConfig), bigtableSchema, + bigtableMetrics, instrumentation); } - public BigTableClient(BigTableSinkConfig sinkConfig, BigtableDataClient bigtableDataClient, BigtableTableAdminClient bigtableTableAdminClient, BigTableSchema bigtableSchema, BigTableMetrics bigtableMetrics, Instrumentation instrumentation) { + public BigTableClient(BigTableSinkConfig sinkConfig, BigtableDataClient bigtableDataClient, + BigtableTableAdminClient bigtableTableAdminClient, BigTableSchema bigtableSchema, + BigTableMetrics bigtableMetrics, Instrumentation instrumentation) { this.sinkConfig = sinkConfig; this.bigtableDataClient = bigtableDataClient; this.bigtableTableAdminClient = bigtableTableAdminClient; @@ -49,7 +53,8 @@ private static BigtableDataClient getBigTableDataClient(BigTableSinkConfig sinkC BigtableDataSettings settings = BigtableDataSettings.newBuilder() .setProjectId(sinkConfig.getGCloudProjectID()) .setInstanceId(sinkConfig.getInstanceId()) - .setCredentialsProvider(FixedCredentialsProvider.create(GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getCredentialPath())))) + .setCredentialsProvider(FixedCredentialsProvider + .create(GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getCredentialPath())))) .build(); return BigtableDataClient.create(settings); } @@ -58,7 +63,8 @@ private static BigtableTableAdminClient getBigTableAdminClient(BigTableSinkConfi BigtableTableAdminSettings settings = BigtableTableAdminSettings.newBuilder() .setProjectId(sinkConfig.getGCloudProjectID()) .setInstanceId(sinkConfig.getInstanceId()) - .setCredentialsProvider(FixedCredentialsProvider.create(GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getCredentialPath())))) + .setCredentialsProvider(FixedCredentialsProvider + .create(GoogleCredentials.fromStream(new FileInputStream(sinkConfig.getCredentialPath())))) .build(); return BigtableTableAdminClient.create(settings); } @@ -101,9 +107,10 @@ public void validateBigTableSchema() throws BigTableInvalidSchemaException { private void checkIfTableExists(String tableId) throws BigTableInvalidSchemaException { if (!bigtableTableAdminClient.exists(tableId)) { - throw new BigTableInvalidSchemaException(String.format("Table not found on the path: projects/%s/instances/%s/tables/%s", - bigtableTableAdminClient.getProjectId(), bigtableTableAdminClient.getInstanceId(), tableId)); - } + throw new BigTableInvalidSchemaException(String.format( + "Table not found on the path: projects/%s/instances/%s/tables/%s", + bigtableTableAdminClient.getProjectId(), bigtableTableAdminClient.getInstanceId(), tableId)); + } } private void checkIfColumnFamiliesExist(String tableId) throws BigTableInvalidSchemaException { diff --git a/src/main/java/io/odpf/depot/bigtable/exception/BigTableInvalidSchemaException.java b/src/main/java/org/raystack/depot/bigtable/exception/BigTableInvalidSchemaException.java similarity index 85% rename from src/main/java/io/odpf/depot/bigtable/exception/BigTableInvalidSchemaException.java rename to src/main/java/org/raystack/depot/bigtable/exception/BigTableInvalidSchemaException.java index fba1a8b7..480705c4 100644 --- a/src/main/java/io/odpf/depot/bigtable/exception/BigTableInvalidSchemaException.java +++ b/src/main/java/org/raystack/depot/bigtable/exception/BigTableInvalidSchemaException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigtable.exception; +package org.raystack.depot.bigtable.exception; public class BigTableInvalidSchemaException extends RuntimeException { public BigTableInvalidSchemaException(String message, Throwable cause) { diff --git a/src/main/java/io/odpf/depot/bigtable/model/BigTableRecord.java b/src/main/java/org/raystack/depot/bigtable/model/BigTableRecord.java similarity index 83% rename from src/main/java/io/odpf/depot/bigtable/model/BigTableRecord.java rename to src/main/java/org/raystack/depot/bigtable/model/BigTableRecord.java index 5cea4b51..8c434c27 100644 --- a/src/main/java/io/odpf/depot/bigtable/model/BigTableRecord.java +++ b/src/main/java/org/raystack/depot/bigtable/model/BigTableRecord.java @@ -1,7 +1,7 @@ -package io.odpf.depot.bigtable.model; +package org.raystack.depot.bigtable.model; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; -import io.odpf.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorInfo; import lombok.AllArgsConstructor; import lombok.Getter; diff --git a/src/main/java/io/odpf/depot/bigtable/model/BigTableSchema.java b/src/main/java/org/raystack/depot/bigtable/model/BigTableSchema.java similarity index 92% rename from src/main/java/io/odpf/depot/bigtable/model/BigTableSchema.java rename to src/main/java/org/raystack/depot/bigtable/model/BigTableSchema.java index 3311eb2d..1b1b408f 100644 --- a/src/main/java/io/odpf/depot/bigtable/model/BigTableSchema.java +++ b/src/main/java/org/raystack/depot/bigtable/model/BigTableSchema.java @@ -1,6 +1,6 @@ -package io.odpf.depot.bigtable.model; +package org.raystack.depot.bigtable.model; -import io.odpf.depot.exception.ConfigurationException; +import org.raystack.depot.exception.ConfigurationException; import org.json.JSONObject; import java.util.HashSet; diff --git a/src/main/java/io/odpf/depot/bigtable/parser/BigTableRecordParser.java b/src/main/java/org/raystack/depot/bigtable/parser/BigTableRecordParser.java similarity index 59% rename from src/main/java/io/odpf/depot/bigtable/parser/BigTableRecordParser.java rename to src/main/java/org/raystack/depot/bigtable/parser/BigTableRecordParser.java index 37ec31b8..878b58ea 100644 --- a/src/main/java/io/odpf/depot/bigtable/parser/BigTableRecordParser.java +++ b/src/main/java/org/raystack/depot/bigtable/parser/BigTableRecordParser.java @@ -1,20 +1,20 @@ -package io.odpf.depot.bigtable.parser; +package org.raystack.depot.bigtable.parser; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.model.BigTableSchema; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.exception.DeserializerException; -import io.odpf.depot.exception.EmptyMessageException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.field.GenericFieldFactory; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.model.BigTableSchema; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.exception.DeserializerException; +import org.raystack.depot.exception.EmptyMessageException; +import org.raystack.depot.message.field.GenericFieldFactory; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; import lombok.extern.slf4j.Slf4j; import java.io.IOException; @@ -24,45 +24,47 @@ @Slf4j public class BigTableRecordParser { - private final OdpfMessageParser odpfMessageParser; + private final MessageParser messageParser; private final BigTableRowKeyParser bigTableRowKeyParser; private final BigTableSchema bigTableSchema; - private final OdpfMessageSchema schema; + private final MessageSchema schema; private final Tuple modeAndSchema; - public BigTableRecordParser(OdpfMessageParser odpfMessageParser, - BigTableRowKeyParser bigTableRowKeyParser, - Tuple modeAndSchema, - OdpfMessageSchema schema, - BigTableSchema bigTableSchema) { - this.odpfMessageParser = odpfMessageParser; + public BigTableRecordParser(MessageParser messageParser, + BigTableRowKeyParser bigTableRowKeyParser, + Tuple modeAndSchema, + MessageSchema schema, + BigTableSchema bigTableSchema) { + this.messageParser = messageParser; this.bigTableRowKeyParser = bigTableRowKeyParser; this.modeAndSchema = modeAndSchema; this.schema = schema; this.bigTableSchema = bigTableSchema; } - public List convert(List messages) { + public List convert(List messages) { ArrayList records = new ArrayList<>(); for (int index = 0; index < messages.size(); index++) { - OdpfMessage message = messages.get(index); + Message message = messages.get(index); BigTableRecord record = createRecord(message, index); records.add(record); } return records; } - private BigTableRecord createRecord(OdpfMessage message, long index) { + private BigTableRecord createRecord(Message message, long index) { try { - ParsedOdpfMessage parsedOdpfMessage = odpfMessageParser.parse(message, modeAndSchema.getFirst(), modeAndSchema.getSecond()); - String rowKey = bigTableRowKeyParser.parse(parsedOdpfMessage); + ParsedMessage parsedMessage = messageParser.parse(message, modeAndSchema.getFirst(), + modeAndSchema.getSecond()); + String rowKey = bigTableRowKeyParser.parse(parsedMessage); RowMutationEntry rowMutationEntry = RowMutationEntry.create(rowKey); bigTableSchema.getColumnFamilies().forEach( columnFamily -> bigTableSchema .getColumns(columnFamily) .forEach(column -> { String fieldName = bigTableSchema.getField(columnFamily, column); - String value = GenericFieldFactory.getField(parsedOdpfMessage.getFieldByName(fieldName, schema)).getString(); + String value = GenericFieldFactory + .getField(parsedMessage.getFieldByName(fieldName, schema)).getString(); rowMutationEntry.setCell(columnFamily, column, value); })); BigTableRecord bigTableRecord = new BigTableRecord(rowMutationEntry, index, null, message.getMetadata()); diff --git a/src/main/java/io/odpf/depot/bigtable/parser/BigTableResponseParser.java b/src/main/java/org/raystack/depot/bigtable/parser/BigTableResponseParser.java similarity index 60% rename from src/main/java/io/odpf/depot/bigtable/parser/BigTableResponseParser.java rename to src/main/java/org/raystack/depot/bigtable/parser/BigTableResponseParser.java index 4bf8bb7c..3cbe5c7c 100644 --- a/src/main/java/io/odpf/depot/bigtable/parser/BigTableResponseParser.java +++ b/src/main/java/org/raystack/depot/bigtable/parser/BigTableResponseParser.java @@ -1,19 +1,20 @@ -package io.odpf.depot.bigtable.parser; +package org.raystack.depot.bigtable.parser; import com.google.cloud.bigtable.data.v2.models.MutateRowsException; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.response.BigTableResponse; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.metrics.BigTableMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigtable.response.BigTableResponse; import java.util.HashMap; import java.util.List; import java.util.Map; public class BigTableResponseParser { - public static Map getErrorsFromSinkResponse(List validRecords, BigTableResponse bigTableResponse, BigTableMetrics bigtableMetrics, Instrumentation instrumentation) { + public static Map getErrorsFromSinkResponse(List validRecords, + BigTableResponse bigTableResponse, BigTableMetrics bigtableMetrics, Instrumentation instrumentation) { HashMap errorInfoMap = new HashMap<>(); for (MutateRowsException.FailedMutation fm : bigTableResponse.getFailedMutations()) { BigTableRecord record = validRecords.get(fm.getIndex()); @@ -30,7 +31,8 @@ public static Map getErrorsFromSinkResponse(List getErrorsFromSinkResponse(List { + R apply(T t, U u, V v); +} diff --git a/src/main/java/io/odpf/depot/common/Template.java b/src/main/java/org/raystack/depot/common/Template.java similarity index 67% rename from src/main/java/io/odpf/depot/common/Template.java rename to src/main/java/org/raystack/depot/common/Template.java index f882af6e..14f5fcd6 100644 --- a/src/main/java/io/odpf/depot/common/Template.java +++ b/src/main/java/org/raystack/depot/common/Template.java @@ -1,12 +1,12 @@ -package io.odpf.depot.common; +package org.raystack.depot.common; import com.google.common.base.Splitter; -import io.odpf.depot.exception.InvalidTemplateException; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.field.GenericFieldFactory; -import io.odpf.depot.message.proto.converter.fields.ProtoField; -import io.odpf.depot.utils.StringUtils; +import org.raystack.depot.exception.InvalidTemplateException; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.field.GenericFieldFactory; +import org.raystack.depot.message.proto.converter.fields.ProtoField; +import org.raystack.depot.utils.StringUtils; import java.util.ArrayList; import java.util.List; @@ -31,14 +31,15 @@ private void validate() throws InvalidTemplateException { int values = patternVariableFieldNames.size(); int variables = StringUtils.count(templatePattern, '%'); if (validArgs != values || variables != values) { - throw new InvalidTemplateException(String.format("Template is not valid, variables=%d, validArgs=%d, values=%d", variables, validArgs, values)); + throw new InvalidTemplateException(String.format( + "Template is not valid, variables=%d, validArgs=%d, values=%d", variables, validArgs, values)); } } - public String parse(ParsedOdpfMessage parsedOdpfMessage, OdpfMessageSchema schema) { + public String parse(ParsedMessage parsedMessage, MessageSchema schema) { Object[] patternVariableData = patternVariableFieldNames .stream() - .map(fieldName -> fetchInternalValue(parsedOdpfMessage.getFieldByName(fieldName, schema))) + .map(fieldName -> fetchInternalValue(parsedMessage.getFieldByName(fieldName, schema))) .toArray(); return String.format(templatePattern, patternVariableData); } diff --git a/src/main/java/io/odpf/depot/common/Tuple.java b/src/main/java/org/raystack/depot/common/Tuple.java similarity index 81% rename from src/main/java/io/odpf/depot/common/Tuple.java rename to src/main/java/org/raystack/depot/common/Tuple.java index 8f068e3e..81d5cec1 100644 --- a/src/main/java/io/odpf/depot/common/Tuple.java +++ b/src/main/java/org/raystack/depot/common/Tuple.java @@ -1,4 +1,4 @@ -package io.odpf.depot.common; +package org.raystack.depot.common; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/src/main/java/io/odpf/depot/common/TupleString.java b/src/main/java/org/raystack/depot/common/TupleString.java similarity index 82% rename from src/main/java/io/odpf/depot/common/TupleString.java rename to src/main/java/org/raystack/depot/common/TupleString.java index 166a9bff..f899e3c4 100644 --- a/src/main/java/io/odpf/depot/common/TupleString.java +++ b/src/main/java/org/raystack/depot/common/TupleString.java @@ -1,4 +1,4 @@ -package io.odpf.depot.common; +package org.raystack.depot.common; import lombok.AllArgsConstructor; import lombok.Data; diff --git a/src/main/java/io/odpf/depot/config/BigQuerySinkConfig.java b/src/main/java/org/raystack/depot/config/BigQuerySinkConfig.java similarity index 86% rename from src/main/java/io/odpf/depot/config/BigQuerySinkConfig.java rename to src/main/java/org/raystack/depot/config/BigQuerySinkConfig.java index 6bdda646..3d6eedb2 100644 --- a/src/main/java/io/odpf/depot/config/BigQuerySinkConfig.java +++ b/src/main/java/org/raystack/depot/config/BigQuerySinkConfig.java @@ -1,14 +1,14 @@ -package io.odpf.depot.config; +package org.raystack.depot.config; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.config.converter.ConfToListConverter; -import io.odpf.depot.config.converter.ConverterUtils; -import io.odpf.depot.config.converter.LabelMapConverter; +import org.raystack.depot.config.converter.ConfToListConverter; +import org.raystack.depot.config.converter.ConverterUtils; +import org.raystack.depot.config.converter.LabelMapConverter; +import org.raystack.depot.common.TupleString; import java.util.List; import java.util.Map; -public interface BigQuerySinkConfig extends OdpfSinkConfig { +public interface BigQuerySinkConfig extends SinkConfig { @Key("SINK_BIGQUERY_GOOGLE_CLOUD_PROJECT_ID") String getGCloudProjectID(); @@ -99,5 +99,8 @@ public interface BigQuerySinkConfig extends OdpfSinkConfig { @DefaultValue("true") @Key("SINK_BIGQUERY_DYNAMIC_SCHEMA_ENABLE") boolean getSinkBigqueryDynamicSchemaEnable(); -} + @DefaultValue("false") + @Key("SINK_BIGQUERY_STORAGE_API_ENABLE") + boolean getSinkBigqueryStorageAPIEnable(); +} diff --git a/src/main/java/io/odpf/depot/config/BigTableSinkConfig.java b/src/main/java/org/raystack/depot/config/BigTableSinkConfig.java similarity index 85% rename from src/main/java/io/odpf/depot/config/BigTableSinkConfig.java rename to src/main/java/org/raystack/depot/config/BigTableSinkConfig.java index 675c9207..fff81f45 100644 --- a/src/main/java/io/odpf/depot/config/BigTableSinkConfig.java +++ b/src/main/java/org/raystack/depot/config/BigTableSinkConfig.java @@ -1,9 +1,9 @@ -package io.odpf.depot.config; +package org.raystack.depot.config; import org.aeonbits.owner.Config; @Config.DisableFeature(Config.DisableableFeature.PARAMETER_FORMATTING) -public interface BigTableSinkConfig extends OdpfSinkConfig { +public interface BigTableSinkConfig extends SinkConfig { @Key("SINK_BIGTABLE_GOOGLE_CLOUD_PROJECT_ID") String getGCloudProjectID(); diff --git a/src/main/java/io/odpf/depot/config/MetricsConfig.java b/src/main/java/org/raystack/depot/config/MetricsConfig.java similarity index 91% rename from src/main/java/io/odpf/depot/config/MetricsConfig.java rename to src/main/java/org/raystack/depot/config/MetricsConfig.java index 2804fa34..449d5caf 100644 --- a/src/main/java/io/odpf/depot/config/MetricsConfig.java +++ b/src/main/java/org/raystack/depot/config/MetricsConfig.java @@ -1,4 +1,4 @@ -package io.odpf.depot.config; +package org.raystack.depot.config; import org.aeonbits.owner.Config; diff --git a/src/main/java/io/odpf/depot/config/RedisSinkConfig.java b/src/main/java/org/raystack/depot/config/RedisSinkConfig.java similarity index 69% rename from src/main/java/io/odpf/depot/config/RedisSinkConfig.java rename to src/main/java/org/raystack/depot/config/RedisSinkConfig.java index dfe0eb57..d4104cea 100644 --- a/src/main/java/io/odpf/depot/config/RedisSinkConfig.java +++ b/src/main/java/org/raystack/depot/config/RedisSinkConfig.java @@ -1,19 +1,18 @@ -package io.odpf.depot.config; - -import io.odpf.depot.config.converter.JsonToPropertiesConverter; -import io.odpf.depot.config.converter.RedisSinkDataTypeConverter; -import io.odpf.depot.config.converter.RedisSinkDeploymentTypeConverter; -import io.odpf.depot.config.converter.RedisSinkTtlTypeConverter; -import io.odpf.depot.redis.enums.RedisSinkDataType; -import io.odpf.depot.redis.enums.RedisSinkDeploymentType; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +package org.raystack.depot.config; + +import org.raystack.depot.config.converter.JsonToPropertiesConverter; +import org.raystack.depot.config.converter.RedisSinkDataTypeConverter; +import org.raystack.depot.config.converter.RedisSinkDeploymentTypeConverter; +import org.raystack.depot.config.converter.RedisSinkTtlTypeConverter; +import org.raystack.depot.redis.enums.RedisSinkDataType; +import org.raystack.depot.redis.enums.RedisSinkDeploymentType; +import org.raystack.depot.redis.enums.RedisSinkTtlType; import org.aeonbits.owner.Config; import java.util.Properties; - @Config.DisableFeature(Config.DisableableFeature.PARAMETER_FORMATTING) -public interface RedisSinkConfig extends OdpfSinkConfig { +public interface RedisSinkConfig extends SinkConfig { @Key("SINK_REDIS_URLS") String getSinkRedisUrls(); diff --git a/src/main/java/io/odpf/depot/config/OdpfSinkConfig.java b/src/main/java/org/raystack/depot/config/SinkConfig.java similarity index 81% rename from src/main/java/io/odpf/depot/config/OdpfSinkConfig.java rename to src/main/java/org/raystack/depot/config/SinkConfig.java index 0d36e1d5..48f84bff 100644 --- a/src/main/java/io/odpf/depot/config/OdpfSinkConfig.java +++ b/src/main/java/org/raystack/depot/config/SinkConfig.java @@ -1,18 +1,18 @@ -package io.odpf.depot.config; - -import io.odpf.depot.config.converter.SchemaRegistryHeadersConverter; -import io.odpf.depot.config.converter.SchemaRegistryRefreshConverter; -import io.odpf.depot.config.converter.SinkConnectorSchemaDataTypeConverter; -import io.odpf.depot.config.converter.SinkConnectorSchemaMessageModeConverter; -import io.odpf.depot.config.enums.SinkConnectorSchemaDataType; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.stencil.cache.SchemaRefreshStrategy; +package org.raystack.depot.config; + +import org.raystack.depot.config.converter.SchemaRegistryHeadersConverter; +import org.raystack.depot.config.converter.SchemaRegistryRefreshConverter; +import org.raystack.depot.config.converter.SinkConnectorSchemaDataTypeConverter; +import org.raystack.depot.config.converter.SinkConnectorSchemaMessageModeConverter; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.stencil.cache.SchemaRefreshStrategy; import org.aeonbits.owner.Config; import org.apache.http.Header; import java.util.List; -public interface OdpfSinkConfig extends Config { +public interface SinkConfig extends Config { @Key("SCHEMA_REGISTRY_STENCIL_ENABLE") @DefaultValue("false") diff --git a/src/main/java/io/odpf/depot/config/converter/ConfToListConverter.java b/src/main/java/org/raystack/depot/config/converter/ConfToListConverter.java similarity index 84% rename from src/main/java/io/odpf/depot/config/converter/ConfToListConverter.java rename to src/main/java/org/raystack/depot/config/converter/ConfToListConverter.java index c80600ce..c6ec6065 100644 --- a/src/main/java/io/odpf/depot/config/converter/ConfToListConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/ConfToListConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.common.TupleString; +import org.raystack.depot.common.TupleString; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/converter/ConverterUtils.java b/src/main/java/org/raystack/depot/config/converter/ConverterUtils.java similarity index 91% rename from src/main/java/io/odpf/depot/config/converter/ConverterUtils.java rename to src/main/java/org/raystack/depot/config/converter/ConverterUtils.java index eafd5d8d..f545b6bc 100644 --- a/src/main/java/io/odpf/depot/config/converter/ConverterUtils.java +++ b/src/main/java/org/raystack/depot/config/converter/ConverterUtils.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.common.Tuple; +import org.raystack.depot.common.Tuple; import java.util.ArrayList; import java.util.List; diff --git a/src/main/java/io/odpf/depot/config/converter/JsonToPropertiesConverter.java b/src/main/java/org/raystack/depot/config/converter/JsonToPropertiesConverter.java similarity index 95% rename from src/main/java/io/odpf/depot/config/converter/JsonToPropertiesConverter.java rename to src/main/java/org/raystack/depot/config/converter/JsonToPropertiesConverter.java index 8c7d26ad..6cd6bd56 100644 --- a/src/main/java/io/odpf/depot/config/converter/JsonToPropertiesConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/JsonToPropertiesConverter.java @@ -1,4 +1,4 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; import com.google.common.base.Strings; import com.google.gson.Gson; @@ -15,7 +15,6 @@ import java.util.function.Consumer; import java.util.stream.Stream; - public class JsonToPropertiesConverter implements org.aeonbits.owner.Converter { private static final Gson GSON = new Gson(); @@ -49,7 +48,8 @@ private void validate(Properties properties) { DuplicateFinder duplicateFinder = flattenValues(properties) .collect(DuplicateFinder::new, DuplicateFinder::accept, DuplicateFinder::combine); if (duplicateFinder.duplicates.size() > 0) { - throw new IllegalArgumentException("duplicates found in SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING for : " + duplicateFinder.duplicates); + throw new IllegalArgumentException("duplicates found in SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING for : " + + duplicateFinder.duplicates); } } diff --git a/src/main/java/io/odpf/depot/config/converter/LabelMapConverter.java b/src/main/java/org/raystack/depot/config/converter/LabelMapConverter.java similarity index 85% rename from src/main/java/io/odpf/depot/config/converter/LabelMapConverter.java rename to src/main/java/org/raystack/depot/config/converter/LabelMapConverter.java index 8be5c848..68b95da9 100644 --- a/src/main/java/io/odpf/depot/config/converter/LabelMapConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/LabelMapConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.common.Tuple; +import org.raystack.depot.common.Tuple; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; @@ -15,4 +15,3 @@ public Map convert(Method method, String input) { return listResult.stream().collect(Collectors.toMap(Tuple::getFirst, Tuple::getSecond)); } } - diff --git a/src/main/java/io/odpf/depot/config/converter/RedisSinkDataTypeConverter.java b/src/main/java/org/raystack/depot/config/converter/RedisSinkDataTypeConverter.java similarity index 75% rename from src/main/java/io/odpf/depot/config/converter/RedisSinkDataTypeConverter.java rename to src/main/java/org/raystack/depot/config/converter/RedisSinkDataTypeConverter.java index b16bdd89..ce570bce 100644 --- a/src/main/java/io/odpf/depot/config/converter/RedisSinkDataTypeConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/RedisSinkDataTypeConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.redis.enums.RedisSinkDataType; +import org.raystack.depot.redis.enums.RedisSinkDataType; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/converter/RedisSinkDeploymentTypeConverter.java b/src/main/java/org/raystack/depot/config/converter/RedisSinkDeploymentTypeConverter.java similarity index 75% rename from src/main/java/io/odpf/depot/config/converter/RedisSinkDeploymentTypeConverter.java rename to src/main/java/org/raystack/depot/config/converter/RedisSinkDeploymentTypeConverter.java index d8dfff02..342b69de 100644 --- a/src/main/java/io/odpf/depot/config/converter/RedisSinkDeploymentTypeConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/RedisSinkDeploymentTypeConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.redis.enums.RedisSinkDeploymentType; +import org.raystack.depot.redis.enums.RedisSinkDeploymentType; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/converter/RedisSinkTtlTypeConverter.java b/src/main/java/org/raystack/depot/config/converter/RedisSinkTtlTypeConverter.java similarity index 75% rename from src/main/java/io/odpf/depot/config/converter/RedisSinkTtlTypeConverter.java rename to src/main/java/org/raystack/depot/config/converter/RedisSinkTtlTypeConverter.java index 88071043..a3776831 100644 --- a/src/main/java/io/odpf/depot/config/converter/RedisSinkTtlTypeConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/RedisSinkTtlTypeConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.redis.enums.RedisSinkTtlType; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/converter/SchemaRegistryHeadersConverter.java b/src/main/java/org/raystack/depot/config/converter/SchemaRegistryHeadersConverter.java similarity index 96% rename from src/main/java/io/odpf/depot/config/converter/SchemaRegistryHeadersConverter.java rename to src/main/java/org/raystack/depot/config/converter/SchemaRegistryHeadersConverter.java index 4d065aaf..1cba14d8 100644 --- a/src/main/java/io/odpf/depot/config/converter/SchemaRegistryHeadersConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/SchemaRegistryHeadersConverter.java @@ -1,4 +1,4 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; import org.aeonbits.owner.Converter; import org.aeonbits.owner.Tokenizer; diff --git a/src/main/java/io/odpf/depot/config/converter/SchemaRegistryRefreshConverter.java b/src/main/java/org/raystack/depot/config/converter/SchemaRegistryRefreshConverter.java similarity index 81% rename from src/main/java/io/odpf/depot/config/converter/SchemaRegistryRefreshConverter.java rename to src/main/java/org/raystack/depot/config/converter/SchemaRegistryRefreshConverter.java index 89fb976f..6b9095f7 100644 --- a/src/main/java/io/odpf/depot/config/converter/SchemaRegistryRefreshConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/SchemaRegistryRefreshConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.stencil.cache.SchemaRefreshStrategy; +import org.raystack.stencil.cache.SchemaRefreshStrategy; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/converter/SinkConnectorSchemaDataTypeConverter.java b/src/main/java/org/raystack/depot/config/converter/SinkConnectorSchemaDataTypeConverter.java similarity index 75% rename from src/main/java/io/odpf/depot/config/converter/SinkConnectorSchemaDataTypeConverter.java rename to src/main/java/org/raystack/depot/config/converter/SinkConnectorSchemaDataTypeConverter.java index c7ac9b82..db850253 100644 --- a/src/main/java/io/odpf/depot/config/converter/SinkConnectorSchemaDataTypeConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/SinkConnectorSchemaDataTypeConverter.java @@ -1,7 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; - -import io.odpf.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/converter/SinkConnectorSchemaMessageModeConverter.java b/src/main/java/org/raystack/depot/config/converter/SinkConnectorSchemaMessageModeConverter.java similarity index 76% rename from src/main/java/io/odpf/depot/config/converter/SinkConnectorSchemaMessageModeConverter.java rename to src/main/java/org/raystack/depot/config/converter/SinkConnectorSchemaMessageModeConverter.java index 36e2d87a..daf5f473 100644 --- a/src/main/java/io/odpf/depot/config/converter/SinkConnectorSchemaMessageModeConverter.java +++ b/src/main/java/org/raystack/depot/config/converter/SinkConnectorSchemaMessageModeConverter.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; import org.aeonbits.owner.Converter; import java.lang.reflect.Method; diff --git a/src/main/java/io/odpf/depot/config/enums/SinkConnectorSchemaDataType.java b/src/main/java/org/raystack/depot/config/enums/SinkConnectorSchemaDataType.java similarity index 62% rename from src/main/java/io/odpf/depot/config/enums/SinkConnectorSchemaDataType.java rename to src/main/java/org/raystack/depot/config/enums/SinkConnectorSchemaDataType.java index 4e666a16..64d57630 100644 --- a/src/main/java/io/odpf/depot/config/enums/SinkConnectorSchemaDataType.java +++ b/src/main/java/org/raystack/depot/config/enums/SinkConnectorSchemaDataType.java @@ -1,4 +1,4 @@ -package io.odpf.depot.config.enums; +package org.raystack.depot.config.enums; public enum SinkConnectorSchemaDataType { PROTOBUF, diff --git a/src/main/java/io/odpf/depot/error/ErrorInfo.java b/src/main/java/org/raystack/depot/error/ErrorInfo.java similarity index 74% rename from src/main/java/io/odpf/depot/error/ErrorInfo.java rename to src/main/java/org/raystack/depot/error/ErrorInfo.java index 7d899d3c..ac7840a0 100644 --- a/src/main/java/io/odpf/depot/error/ErrorInfo.java +++ b/src/main/java/org/raystack/depot/error/ErrorInfo.java @@ -1,4 +1,4 @@ -package io.odpf.depot.error; +package org.raystack.depot.error; import lombok.AllArgsConstructor; import lombok.Data; @@ -13,6 +13,7 @@ public class ErrorInfo { private ErrorType errorType; public String toString() { - return String.format("Exception %s, ErrorType: %s", exception != null ? exception.getMessage() : "NULL", errorType.name()); + return String.format("Exception %s, ErrorType: %s", exception != null ? exception.getMessage() : "NULL", + errorType.name()); } } diff --git a/src/main/java/io/odpf/depot/error/ErrorType.java b/src/main/java/org/raystack/depot/error/ErrorType.java similarity index 74% rename from src/main/java/io/odpf/depot/error/ErrorType.java rename to src/main/java/org/raystack/depot/error/ErrorType.java index e11f1c15..dbfca7fa 100644 --- a/src/main/java/io/odpf/depot/error/ErrorType.java +++ b/src/main/java/org/raystack/depot/error/ErrorType.java @@ -1,4 +1,4 @@ -package io.odpf.depot.error; +package org.raystack.depot.error; public enum ErrorType { DESERIALIZATION_ERROR, @@ -8,5 +8,5 @@ public enum ErrorType { SINK_5XX_ERROR, SINK_RETRYABLE_ERROR, SINK_UNKNOWN_ERROR, - DEFAULT_ERROR //Deprecated + DEFAULT_ERROR // Deprecated } diff --git a/src/main/java/io/odpf/depot/exception/ConfigurationException.java b/src/main/java/org/raystack/depot/exception/ConfigurationException.java similarity index 87% rename from src/main/java/io/odpf/depot/exception/ConfigurationException.java rename to src/main/java/org/raystack/depot/exception/ConfigurationException.java index 9fb93bca..6236c793 100644 --- a/src/main/java/io/odpf/depot/exception/ConfigurationException.java +++ b/src/main/java/org/raystack/depot/exception/ConfigurationException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.exception; +package org.raystack.depot.exception; public class ConfigurationException extends RuntimeException { public ConfigurationException(String message) { diff --git a/src/main/java/io/odpf/depot/exception/DeserializerException.java b/src/main/java/org/raystack/depot/exception/DeserializerException.java similarity index 79% rename from src/main/java/io/odpf/depot/exception/DeserializerException.java rename to src/main/java/org/raystack/depot/exception/DeserializerException.java index 67d2693a..44fae1f7 100644 --- a/src/main/java/io/odpf/depot/exception/DeserializerException.java +++ b/src/main/java/org/raystack/depot/exception/DeserializerException.java @@ -1,7 +1,8 @@ -package io.odpf.depot.exception; +package org.raystack.depot.exception; /** - * Deserializer exception is thrown when message from proto is not deserializable into the Java object. + * Deserializer exception is thrown when message from proto is not + * deserializable into the Java object. */ public class DeserializerException extends RuntimeException { diff --git a/src/main/java/io/odpf/depot/exception/EmptyMessageException.java b/src/main/java/org/raystack/depot/exception/EmptyMessageException.java similarity index 85% rename from src/main/java/io/odpf/depot/exception/EmptyMessageException.java rename to src/main/java/org/raystack/depot/exception/EmptyMessageException.java index 51e6c9aa..18e04069 100644 --- a/src/main/java/io/odpf/depot/exception/EmptyMessageException.java +++ b/src/main/java/org/raystack/depot/exception/EmptyMessageException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.exception; +package org.raystack.depot.exception; /** * Empty thrown when the message is contains zero bytes. @@ -8,4 +8,3 @@ public EmptyMessageException() { super("log message is empty"); } } - diff --git a/src/main/java/io/odpf/depot/exception/InvalidTemplateException.java b/src/main/java/org/raystack/depot/exception/InvalidTemplateException.java similarity index 79% rename from src/main/java/io/odpf/depot/exception/InvalidTemplateException.java rename to src/main/java/org/raystack/depot/exception/InvalidTemplateException.java index db618f4a..6b9b2183 100644 --- a/src/main/java/io/odpf/depot/exception/InvalidTemplateException.java +++ b/src/main/java/org/raystack/depot/exception/InvalidTemplateException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.exception; +package org.raystack.depot.exception; public class InvalidTemplateException extends Exception { public InvalidTemplateException(String message) { diff --git a/src/main/java/io/odpf/depot/exception/ProtoNotFoundException.java b/src/main/java/org/raystack/depot/exception/ProtoNotFoundException.java similarity index 79% rename from src/main/java/io/odpf/depot/exception/ProtoNotFoundException.java rename to src/main/java/org/raystack/depot/exception/ProtoNotFoundException.java index 79753028..2da75e54 100644 --- a/src/main/java/io/odpf/depot/exception/ProtoNotFoundException.java +++ b/src/main/java/org/raystack/depot/exception/ProtoNotFoundException.java @@ -1,4 +1,4 @@ -package io.odpf.depot.exception; +package org.raystack.depot.exception; public class ProtoNotFoundException extends RuntimeException { public ProtoNotFoundException(String message) { diff --git a/src/main/java/org/raystack/depot/exception/SinkException.java b/src/main/java/org/raystack/depot/exception/SinkException.java new file mode 100644 index 00000000..5fadc799 --- /dev/null +++ b/src/main/java/org/raystack/depot/exception/SinkException.java @@ -0,0 +1,9 @@ +package org.raystack.depot.exception; + +import java.io.IOException; + +public class SinkException extends IOException { + public SinkException(String message, Throwable th) { + super(message, th); + } +} diff --git a/src/main/java/io/odpf/depot/exception/UnknownFieldsException.java b/src/main/java/org/raystack/depot/exception/UnknownFieldsException.java similarity index 55% rename from src/main/java/io/odpf/depot/exception/UnknownFieldsException.java rename to src/main/java/org/raystack/depot/exception/UnknownFieldsException.java index c6698b39..71a53422 100644 --- a/src/main/java/io/odpf/depot/exception/UnknownFieldsException.java +++ b/src/main/java/org/raystack/depot/exception/UnknownFieldsException.java @@ -1,14 +1,20 @@ -package io.odpf.depot.exception; - +package org.raystack.depot.exception; import com.google.protobuf.DynamicMessage; /** - * UnknownFieldsException is thrown when unknown fields is detected on the log message although the proto message was succesfuly parsed. - * Unknown fields error can happen because multiple causes, and can be handled differently depends on the use case. - * Unknown fields error by default should be handled by retry the processing because there is a probability that, message deserializer is not updated to the latest schema - * When consumer is deliberately process message using different schema and intentionally ignore extra fields that missing from descriptor the error handling can be disabled. - * On some use case that need zero data loss, for example data warehousing unknown fields error should be handled properly to prevent missing fields. + * UnknownFieldsException is thrown when unknown fields is detected on the log + * message although the proto message was succesfuly parsed. + * Unknown fields error can happen because multiple causes, and can be handled + * differently depends on the use case. + * Unknown fields error by default should be handled by retry the processing + * because there is a probability that, message deserializer is not updated to + * the latest schema + * When consumer is deliberately process message using different schema and + * intentionally ignore extra fields that missing from descriptor the error + * handling can be disabled. + * On some use case that need zero data loss, for example data warehousing + * unknown fields error should be handled properly to prevent missing fields. */ public class UnknownFieldsException extends DeserializerException { diff --git a/src/main/java/org/raystack/depot/log/LogSink.java b/src/main/java/org/raystack/depot/log/LogSink.java new file mode 100644 index 00000000..fa17f494 --- /dev/null +++ b/src/main/java/org/raystack/depot/log/LogSink.java @@ -0,0 +1,57 @@ +package org.raystack.depot.log; + +import org.raystack.depot.Sink; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.SinkException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.SinkResponse; + +import java.io.IOException; +import java.util.List; + +public class LogSink implements Sink { + private final MessageParser messageParser; + private final Instrumentation instrumentation; + private final SinkConfig config; + + public LogSink(SinkConfig config, MessageParser messageParser, Instrumentation instrumentation) { + this.messageParser = messageParser; + this.instrumentation = instrumentation; + this.config = config; + } + + @Override + public SinkResponse pushToSink(List messages) throws SinkException { + SinkResponse response = new SinkResponse(); + SinkConnectorSchemaMessageMode mode = config.getSinkConnectorSchemaMessageMode(); + String schemaClass = mode == SinkConnectorSchemaMessageMode.LOG_MESSAGE + ? config.getSinkConnectorSchemaProtoMessageClass() + : config.getSinkConnectorSchemaProtoKeyClass(); + for (int ii = 0; ii < messages.size(); ii++) { + Message message = messages.get(ii); + try { + ParsedMessage parsedMessage = messageParser.parse( + message, + mode, + schemaClass); + instrumentation.logInfo("\n================= DATA =======================\n{}" + + "\n================= METADATA =======================\n{}\n", + parsedMessage.toString(), message.getMetadataString()); + } catch (IOException e) { + response.addErrors(ii, new ErrorInfo(e, ErrorType.DESERIALIZATION_ERROR)); + } + } + return response; + } + + @Override + public void close() throws IOException { + + } +} diff --git a/src/main/java/org/raystack/depot/log/LogSinkFactory.java b/src/main/java/org/raystack/depot/log/LogSinkFactory.java new file mode 100644 index 00000000..f8a2bdde --- /dev/null +++ b/src/main/java/org/raystack/depot/log/LogSinkFactory.java @@ -0,0 +1,40 @@ +package org.raystack.depot.log; + +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageParserFactory; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import com.timgroup.statsd.NoOpStatsDClient; +import org.raystack.depot.Sink; +import org.aeonbits.owner.ConfigFactory; + +import java.util.Map; + +public class LogSinkFactory { + + private final StatsDReporter statsDReporter; + private MessageParser messageParser; + private final SinkConfig sinkConfig; + + public LogSinkFactory(Map env, StatsDReporter statsDReporter) { + this(ConfigFactory.create(SinkConfig.class, env), statsDReporter); + } + + public LogSinkFactory(SinkConfig sinkConfig, StatsDReporter statsDReporter) { + this.sinkConfig = sinkConfig; + this.statsDReporter = statsDReporter; + } + + public LogSinkFactory(SinkConfig sinkConfig) { + this(sinkConfig, new StatsDReporter(new NoOpStatsDClient())); + } + + public void init() { + this.messageParser = MessageParserFactory.getParser(sinkConfig, statsDReporter); + } + + public Sink create() { + return new LogSink(sinkConfig, messageParser, new Instrumentation(statsDReporter, LogSink.class)); + } +} diff --git a/src/main/java/io/odpf/depot/message/OdpfMessage.java b/src/main/java/org/raystack/depot/message/Message.java similarity index 81% rename from src/main/java/io/odpf/depot/message/OdpfMessage.java rename to src/main/java/org/raystack/depot/message/Message.java index aa1c6ba6..c0ca96a0 100644 --- a/src/main/java/io/odpf/depot/message/OdpfMessage.java +++ b/src/main/java/org/raystack/depot/message/Message.java @@ -1,7 +1,7 @@ -package io.odpf.depot.message; +package org.raystack.depot.message; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.common.TupleString; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.common.TupleString; import lombok.EqualsAndHashCode; import lombok.Getter; @@ -11,10 +11,9 @@ import java.util.Map; import java.util.stream.Collectors; - @Getter @EqualsAndHashCode -public class OdpfMessage { +public class Message { private final Object logKey; private final Object logMessage; private final Map metadata = new HashMap<>(); @@ -26,7 +25,7 @@ public String getMetadataString() { } @SafeVarargs - public OdpfMessage(Object logKey, Object logMessage, Tuple... tuples) { + public Message(Object logKey, Object logMessage, Tuple... tuples) { this.logKey = logKey; this.logMessage = logMessage; Arrays.stream(tuples).forEach(t -> metadata.put(t.getFirst(), t.getSecond())); diff --git a/src/main/java/org/raystack/depot/message/MessageParser.java b/src/main/java/org/raystack/depot/message/MessageParser.java new file mode 100644 index 00000000..e53a646e --- /dev/null +++ b/src/main/java/org/raystack/depot/message/MessageParser.java @@ -0,0 +1,9 @@ +package org.raystack.depot.message; + +import java.io.IOException; + +public interface MessageParser { + ParsedMessage parse(Message message, SinkConnectorSchemaMessageMode type, String schemaClass) throws IOException; + + MessageSchema getSchema(String schemaClass) throws IOException; +} diff --git a/src/main/java/org/raystack/depot/message/MessageParserFactory.java b/src/main/java/org/raystack/depot/message/MessageParserFactory.java new file mode 100644 index 00000000..c67a9ba9 --- /dev/null +++ b/src/main/java/org/raystack/depot/message/MessageParserFactory.java @@ -0,0 +1,29 @@ +package org.raystack.depot.message; + +import org.raystack.depot.message.json.JsonMessageParser; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.JsonParserMetrics; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.stencil.DepotStencilUpdateListener; +import org.raystack.depot.config.SinkConfig; + +public class MessageParserFactory { + public static MessageParser getParser(SinkConfig config, StatsDReporter statsDReporter, + DepotStencilUpdateListener depotStencilUpdateListener) { + switch (config.getSinkConnectorSchemaDataType()) { + case JSON: + return new JsonMessageParser(config, + new Instrumentation(statsDReporter, JsonMessageParser.class), + new JsonParserMetrics(config)); + case PROTOBUF: + return new ProtoMessageParser(config, statsDReporter, depotStencilUpdateListener); + default: + throw new IllegalArgumentException("Schema Type is not supported"); + } + } + + public static MessageParser getParser(SinkConfig config, StatsDReporter statsDReporter) { + return getParser(config, statsDReporter, null); + } +} diff --git a/src/main/java/org/raystack/depot/message/MessageSchema.java b/src/main/java/org/raystack/depot/message/MessageSchema.java new file mode 100644 index 00000000..c1fbc1bc --- /dev/null +++ b/src/main/java/org/raystack/depot/message/MessageSchema.java @@ -0,0 +1,6 @@ +package org.raystack.depot.message; + +public interface MessageSchema { + + Object getSchema(); +} diff --git a/src/main/java/io/odpf/depot/message/MessageUtils.java b/src/main/java/org/raystack/depot/message/MessageUtils.java similarity index 91% rename from src/main/java/io/odpf/depot/message/MessageUtils.java rename to src/main/java/org/raystack/depot/message/MessageUtils.java index 6bfc5b50..b4386da0 100644 --- a/src/main/java/io/odpf/depot/message/MessageUtils.java +++ b/src/main/java/org/raystack/depot/message/MessageUtils.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message; +package org.raystack.depot.message; import com.jayway.jsonpath.Configuration; import com.jayway.jsonpath.JsonPath; @@ -19,7 +19,7 @@ public static Object getFieldFromJsonObject(String name, JSONObject jsonObject, } } - public static void validate(OdpfMessage message, Class validClass) throws IOException { + public static void validate(Message message, Class validClass) throws IOException { if ((message.getLogKey() != null && !(validClass.isInstance(message.getLogKey()))) || (message.getLogMessage() != null && !(validClass.isInstance(message.getLogMessage())))) { throw new IOException( diff --git a/src/main/java/org/raystack/depot/message/ParsedMessage.java b/src/main/java/org/raystack/depot/message/ParsedMessage.java new file mode 100644 index 00000000..8db8c7b2 --- /dev/null +++ b/src/main/java/org/raystack/depot/message/ParsedMessage.java @@ -0,0 +1,16 @@ +package org.raystack.depot.message; + +import org.raystack.depot.config.SinkConfig; + +import java.io.IOException; +import java.util.Map; + +public interface ParsedMessage { + Object getRaw(); + + void validate(SinkConfig config); + + Map getMapping(MessageSchema schema) throws IOException; + + Object getFieldByName(String name, MessageSchema messageSchema); +} diff --git a/src/main/java/io/odpf/depot/message/SinkConnectorSchemaMessageMode.java b/src/main/java/org/raystack/depot/message/SinkConnectorSchemaMessageMode.java similarity index 68% rename from src/main/java/io/odpf/depot/message/SinkConnectorSchemaMessageMode.java rename to src/main/java/org/raystack/depot/message/SinkConnectorSchemaMessageMode.java index f4efd4b5..5b4c06db 100644 --- a/src/main/java/io/odpf/depot/message/SinkConnectorSchemaMessageMode.java +++ b/src/main/java/org/raystack/depot/message/SinkConnectorSchemaMessageMode.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message; +package org.raystack.depot.message; public enum SinkConnectorSchemaMessageMode { LOG_KEY, diff --git a/src/main/java/io/odpf/depot/message/field/FieldUtils.java b/src/main/java/org/raystack/depot/message/field/FieldUtils.java similarity index 94% rename from src/main/java/io/odpf/depot/message/field/FieldUtils.java rename to src/main/java/org/raystack/depot/message/field/FieldUtils.java index 0d7f8b73..b3aab154 100644 --- a/src/main/java/io/odpf/depot/message/field/FieldUtils.java +++ b/src/main/java/org/raystack/depot/message/field/FieldUtils.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.field; +package org.raystack.depot.message.field; import com.google.gson.Gson; import com.google.gson.GsonBuilder; @@ -21,7 +21,8 @@ public static String convertToStringForMessageTypes(Object value, Function getMapping(OdpfMessageSchema schema) { + public Map getMapping(MessageSchema schema) { if (jsonObject == null || jsonObject.isEmpty()) { return Collections.emptyMap(); } return jsonObject.toMap(); } - public Object getFieldByName(String name, OdpfMessageSchema odpfMessageSchema) { + public Object getFieldByName(String name, MessageSchema messageSchema) { if (name == null || name.isEmpty()) { throw new IllegalArgumentException("Invalid field config : name can not be empty"); } diff --git a/src/main/java/io/odpf/depot/message/proto/Constants.java b/src/main/java/org/raystack/depot/message/proto/Constants.java similarity index 91% rename from src/main/java/io/odpf/depot/message/proto/Constants.java rename to src/main/java/org/raystack/depot/message/proto/Constants.java index 7c645fac..db7162e8 100644 --- a/src/main/java/io/odpf/depot/message/proto/Constants.java +++ b/src/main/java/org/raystack/depot/message/proto/Constants.java @@ -1,5 +1,4 @@ -package io.odpf.depot.message.proto; - +package org.raystack.depot.message.proto; public class Constants { public static class Config { diff --git a/src/main/java/io/odpf/depot/message/proto/DescriptorCache.java b/src/main/java/org/raystack/depot/message/proto/DescriptorCache.java similarity index 76% rename from src/main/java/io/odpf/depot/message/proto/DescriptorCache.java rename to src/main/java/org/raystack/depot/message/proto/DescriptorCache.java index 9047416c..0c5429a9 100644 --- a/src/main/java/io/odpf/depot/message/proto/DescriptorCache.java +++ b/src/main/java/org/raystack/depot/message/proto/DescriptorCache.java @@ -1,11 +1,12 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.google.protobuf.Descriptors; import java.util.Map; public class DescriptorCache { - public Descriptors.Descriptor fetch(Map allDescriptors, Map typeNameToPackageNameMap, String protoName) { + public Descriptors.Descriptor fetch(Map allDescriptors, + Map typeNameToPackageNameMap, String protoName) { if (allDescriptors.get(protoName) != null) { return allDescriptors.get(protoName); } diff --git a/src/main/java/io/odpf/depot/message/proto/ProtoField.java b/src/main/java/org/raystack/depot/message/proto/ProtoField.java similarity index 92% rename from src/main/java/io/odpf/depot/message/proto/ProtoField.java rename to src/main/java/org/raystack/depot/message/proto/ProtoField.java index 965568cd..1c2d5423 100644 --- a/src/main/java/io/odpf/depot/message/proto/ProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/ProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.google.protobuf.DescriptorProtos; import lombok.EqualsAndHashCode; @@ -20,7 +20,8 @@ public ProtoField() { this.fields = new ArrayList<>(); } - public ProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label, List fields, int index) { + public ProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, + DescriptorProtos.FieldDescriptorProto.Label label, List fields, int index) { this.name = name; this.typeName = typeName; this.type = type; diff --git a/src/main/java/io/odpf/depot/message/proto/ProtoFieldParser.java b/src/main/java/org/raystack/depot/message/proto/ProtoFieldParser.java similarity index 71% rename from src/main/java/io/odpf/depot/message/proto/ProtoFieldParser.java rename to src/main/java/org/raystack/depot/message/proto/ProtoFieldParser.java index fa570c46..3a0f65c2 100644 --- a/src/main/java/io/odpf/depot/message/proto/ProtoFieldParser.java +++ b/src/main/java/org/raystack/depot/message/proto/ProtoFieldParser.java @@ -1,7 +1,7 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.google.protobuf.Descriptors; -import io.odpf.depot.exception.ProtoNotFoundException; +import org.raystack.depot.exception.ProtoNotFoundException; import java.util.Map; @@ -13,15 +13,18 @@ public class ProtoFieldParser { private static final int MAX_NESTED_SCHEMA_LEVEL = 15; private final DescriptorCache descriptorCache = new DescriptorCache(); - public ProtoField parseFields(ProtoField protoField, String protoSchema, Map allDescriptors, - Map typeNameToPackageNameMap) { + public ProtoField parseFields(ProtoField protoField, String protoSchema, + Map allDescriptors, + Map typeNameToPackageNameMap) { return parseFields(protoField, protoSchema, allDescriptors, typeNameToPackageNameMap, 1); } - private ProtoField parseFields(ProtoField protoField, String protoSchema, Map allDescriptors, - Map typeNameToPackageNameMap, int level) { + private ProtoField parseFields(ProtoField protoField, String protoSchema, + Map allDescriptors, + Map typeNameToPackageNameMap, int level) { - Descriptors.Descriptor currentProto = descriptorCache.fetch(allDescriptors, typeNameToPackageNameMap, protoSchema); + Descriptors.Descriptor currentProto = descriptorCache.fetch(allDescriptors, typeNameToPackageNameMap, + protoSchema); if (currentProto == null) { throw new ProtoNotFoundException("No Proto found for class " + protoSchema); } @@ -33,7 +36,8 @@ private ProtoField parseFields(ProtoField protoField, String protoSchema, Map getDescriptorMap() { @@ -75,11 +78,11 @@ public Map getDescriptorMap() { } @Override - public OdpfMessageSchema getSchema(String schemaClass) throws IOException { + public MessageSchema getSchema(String schemaClass) throws IOException { ProtoField protoField = new ProtoField(); protoField = protoMappingParser.parseFields(protoField, schemaClass, getDescriptorMap(), getTypeNameToPackageNameMap(getDescriptorMap())); - return new ProtoOdpfMessageSchema(protoField); + return new ProtoMessageSchema(protoField); } private Map getTypeNameToPackageNameMap(Map descriptors) { @@ -95,10 +98,11 @@ private Predicate distinctByFullName(Function keyExtra return t -> objects.add(keyExtractor.apply(t)); } - public OdpfMessageSchema getSchema(String schemaClass, Map newDescriptors) throws IOException { + public MessageSchema getSchema(String schemaClass, Map newDescriptors) + throws IOException { ProtoField protoField = new ProtoField(); protoField = protoMappingParser.parseFields(protoField, schemaClass, newDescriptors, getTypeNameToPackageNameMap(newDescriptors)); - return new ProtoOdpfMessageSchema(protoField); + return new ProtoMessageSchema(protoField); } } diff --git a/src/main/java/io/odpf/depot/message/proto/ProtoOdpfMessageSchema.java b/src/main/java/org/raystack/depot/message/proto/ProtoMessageSchema.java similarity index 82% rename from src/main/java/io/odpf/depot/message/proto/ProtoOdpfMessageSchema.java rename to src/main/java/org/raystack/depot/message/proto/ProtoMessageSchema.java index e97ca805..9cdb3ead 100644 --- a/src/main/java/io/odpf/depot/message/proto/ProtoOdpfMessageSchema.java +++ b/src/main/java/org/raystack/depot/message/proto/ProtoMessageSchema.java @@ -1,8 +1,8 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; -import io.odpf.depot.message.OdpfMessageSchema; +import org.raystack.depot.message.MessageSchema; import lombok.EqualsAndHashCode; import lombok.Getter; @@ -12,18 +12,18 @@ import java.util.Properties; @EqualsAndHashCode -public class ProtoOdpfMessageSchema implements OdpfMessageSchema { +public class ProtoMessageSchema implements MessageSchema { @Getter private final ProtoField protoField; private static final Gson GSON = new Gson(); private final Properties properties; - public ProtoOdpfMessageSchema(ProtoField protoField) throws IOException { + public ProtoMessageSchema(ProtoField protoField) throws IOException { this(protoField, createProperties(protoField)); } - public ProtoOdpfMessageSchema(ProtoField protoField, Properties properties) { + public ProtoMessageSchema(ProtoField protoField, Properties properties) { this.protoField = protoField; this.properties = properties; } diff --git a/src/main/java/io/odpf/depot/message/proto/ProtoOdpfParsedMessage.java b/src/main/java/org/raystack/depot/message/proto/ProtoParsedMessage.java similarity index 76% rename from src/main/java/io/odpf/depot/message/proto/ProtoOdpfParsedMessage.java rename to src/main/java/org/raystack/depot/message/proto/ProtoParsedMessage.java index 7081699a..d324bd4e 100644 --- a/src/main/java/io/odpf/depot/message/proto/ProtoOdpfParsedMessage.java +++ b/src/main/java/org/raystack/depot/message/proto/ProtoParsedMessage.java @@ -1,36 +1,37 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.google.api.client.util.DateTime; -import com.google.api.client.util.Preconditions; +import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.exception.UnknownFieldsException; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.proto.converter.fields.DurationProtoField; -import io.odpf.depot.message.proto.converter.fields.MessageProtoField; -import io.odpf.depot.message.proto.converter.fields.ProtoField; -import io.odpf.depot.message.proto.converter.fields.ProtoFieldFactory; -import io.odpf.depot.utils.ProtoUtils; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.proto.converter.fields.DurationProtoField; +import org.raystack.depot.message.proto.converter.fields.MessageProtoField; +import org.raystack.depot.message.proto.converter.fields.ProtoField; +import org.raystack.depot.message.proto.converter.fields.ProtoFieldFactory; +import org.raystack.depot.utils.ProtoUtils; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.exception.UnknownFieldsException; import lombok.extern.slf4j.Slf4j; import java.time.Instant; import java.util.ArrayList; +import java.util.Base64; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Properties; @Slf4j -public class ProtoOdpfParsedMessage implements ParsedOdpfMessage { +public class ProtoParsedMessage implements ParsedMessage { private final DynamicMessage dynamicMessage; - private final Map> cachedMapping = new HashMap<>(); + private final Map> cachedMapping = new HashMap<>(); - public ProtoOdpfParsedMessage(DynamicMessage dynamicMessage) { + public ProtoParsedMessage(DynamicMessage dynamicMessage) { this.dynamicMessage = dynamicMessage; } @@ -44,15 +45,16 @@ public Object getRaw() { } @Override - public void validate(OdpfSinkConfig config) { - if (!config.getSinkConnectorSchemaProtoAllowUnknownFieldsEnable() && ProtoUtils.hasUnknownField(dynamicMessage)) { + public void validate(SinkConfig config) { + if (!config.getSinkConnectorSchemaProtoAllowUnknownFieldsEnable() + && ProtoUtils.hasUnknownField(dynamicMessage)) { log.error("Unknown fields {}", UnknownProtoFields.toString(dynamicMessage.toByteArray())); throw new UnknownFieldsException(dynamicMessage); } } @Override - public Map getMapping(OdpfMessageSchema schema) { + public Map getMapping(MessageSchema schema) { if (schema.getSchema() == null) { throw new ConfigurationException("Schema is not configured"); } @@ -90,7 +92,7 @@ private Map getMappings(DynamicMessage message, Properties colum Tuple nestedColumns = getNestedColumnName(field, value); row.put(nestedColumns.getFirst(), nestedColumns.getSecond()); } else { - floatCheck(fieldValue); + fieldValue = bytesCheck(fieldValue); row.put(columnName, fieldValue); } } @@ -98,13 +100,13 @@ private Map getMappings(DynamicMessage message, Properties colum return row; } - private void floatCheck(Object fieldValue) { - if (fieldValue instanceof Float) { - float floatValue = ((Number) fieldValue).floatValue(); - Preconditions.checkArgument(!Float.isInfinite(floatValue) && !Float.isNaN(floatValue)); - } else if (fieldValue instanceof Double) { - double doubleValue = ((Number) fieldValue).doubleValue(); - Preconditions.checkArgument(!Double.isInfinite(doubleValue) && !Double.isNaN(doubleValue)); + private Object bytesCheck(Object fieldValue) { + if (fieldValue instanceof ByteString) { + ByteString byteString = (ByteString) fieldValue; + byte[] bytes = byteString.toStringUtf8().getBytes(); + return new String(Base64.getEncoder().encode(bytes)); + } else { + return fieldValue; } } @@ -139,7 +141,7 @@ private void addRepeatedFields(Map row, Object value, List row, Object value, List) { + return ((Collection) fieldValue).stream().map(this::getByteString).collect(Collectors.toList()); + } + return getByteString(fieldValue); + } + + private Object getByteString(Object field) { + ByteString byteString = (ByteString) field; byte[] bytes = byteString.toStringUtf8().getBytes(); return base64Encode(bytes); } diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/DefaultProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/DefaultProtoField.java similarity index 83% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/DefaultProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/DefaultProtoField.java index 435adb3a..0c6e2384 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/DefaultProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/DefaultProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import lombok.AllArgsConstructor; diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/DurationProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/DurationProtoField.java similarity index 73% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/DurationProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/DurationProtoField.java index 81257441..61753103 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/DurationProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/DurationProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; @@ -19,6 +19,7 @@ public Object getValue() { @Override public boolean matches() { return descriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE - && descriptor.getMessageType().getFullName().equals(com.google.protobuf.Duration.getDescriptor().getFullName()); + && descriptor.getMessageType().getFullName() + .equals(com.google.protobuf.Duration.getDescriptor().getFullName()); } } diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/EnumProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/EnumProtoField.java similarity index 93% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/EnumProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/EnumProtoField.java index a9d8400d..3c55516f 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/EnumProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/EnumProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import lombok.AllArgsConstructor; diff --git a/src/main/java/org/raystack/depot/message/proto/converter/fields/FloatProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/FloatProtoField.java new file mode 100644 index 00000000..6a445011 --- /dev/null +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/FloatProtoField.java @@ -0,0 +1,39 @@ +package org.raystack.depot.message.proto.converter.fields; + +import com.google.protobuf.Descriptors; + +import java.util.Collection; +import java.util.stream.Collectors; + +public class FloatProtoField implements ProtoField { + private final Object fieldValue; + private final Descriptors.FieldDescriptor descriptor; + + public FloatProtoField(Descriptors.FieldDescriptor descriptor, Object fieldValue) { + this.descriptor = descriptor; + this.fieldValue = fieldValue; + } + + @Override + public Object getValue() { + if (fieldValue instanceof Collection) { + return ((Collection) fieldValue).stream().map(this::getValue).collect(Collectors.toList()); + } + return getValue(fieldValue); + } + + public Double getValue(Object field) { + double val = Double.parseDouble(field.toString()); + boolean valid = !Double.isInfinite(val) && !Double.isNaN(val); + if (!valid) { + throw new IllegalArgumentException("Float/double value is not valid"); + } + return val; + } + + @Override + public boolean matches() { + return descriptor.getType() == Descriptors.FieldDescriptor.Type.FLOAT + || descriptor.getType() == Descriptors.FieldDescriptor.Type.DOUBLE; + } +} diff --git a/src/main/java/org/raystack/depot/message/proto/converter/fields/IntegerProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/IntegerProtoField.java new file mode 100644 index 00000000..7237d33f --- /dev/null +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/IntegerProtoField.java @@ -0,0 +1,42 @@ +package org.raystack.depot.message.proto.converter.fields; + +import com.google.protobuf.Descriptors; + +import java.util.Collection; +import java.util.stream.Collectors; + +public class IntegerProtoField implements ProtoField { + private final Descriptors.FieldDescriptor descriptor; + private final Object fieldValue; + + public IntegerProtoField(Descriptors.FieldDescriptor descriptor, Object fieldValue) { + this.descriptor = descriptor; + this.fieldValue = fieldValue; + } + + @Override + public Object getValue() { + if (fieldValue instanceof Collection) { + return ((Collection) fieldValue).stream().map(this::getValue).collect(Collectors.toList()); + } + return getValue(fieldValue); + } + + public Long getValue(Object field) { + return Long.valueOf(field.toString()); + } + + @Override + public boolean matches() { + return descriptor.getType() == Descriptors.FieldDescriptor.Type.INT64 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.UINT64 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.FIXED64 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.SFIXED64 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.SINT64 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.INT32 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.UINT32 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.FIXED32 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.SFIXED32 + || descriptor.getType() == Descriptors.FieldDescriptor.Type.SINT32; + } +} diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/MapProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/MapProtoField.java similarity index 90% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/MapProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/MapProtoField.java index 781e1947..66d4641f 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/MapProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/MapProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/MessageProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/MessageProtoField.java similarity index 88% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/MessageProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/MessageProtoField.java index d43a471e..5397d682 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/MessageProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/MessageProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import lombok.AllArgsConstructor; diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/ProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/ProtoField.java similarity index 57% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/ProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/ProtoField.java index ef90cb23..afc71296 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/ProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/ProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; public interface ProtoField { diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/ProtoFieldFactory.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/ProtoFieldFactory.java similarity index 76% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/ProtoFieldFactory.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/ProtoFieldFactory.java index ecc552d9..db7ddc98 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/ProtoFieldFactory.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/ProtoFieldFactory.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; @@ -12,12 +12,11 @@ public static ProtoField getField(Descriptors.FieldDescriptor descriptor, Object List protoFields = Arrays.asList( new DurationProtoField(descriptor, fieldValue), new TimestampProtoField(descriptor, fieldValue), - new MapProtoField(descriptor, fieldValue), new EnumProtoField(descriptor, fieldValue), - new ByteProtoField(descriptor, fieldValue), new StructProtoField(descriptor, fieldValue), - new MessageProtoField(descriptor, fieldValue) - ); + new FloatProtoField(descriptor, fieldValue), + new IntegerProtoField(descriptor, fieldValue), + new MessageProtoField(descriptor, fieldValue)); Optional first = protoFields .stream() .filter(ProtoField::matches) diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/StructProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/StructProtoField.java similarity index 86% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/StructProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/StructProtoField.java index 34d71419..630a3520 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/StructProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/StructProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import com.google.protobuf.InvalidProtocolBufferException; @@ -41,6 +41,7 @@ private String getString(Object field) throws InvalidProtocolBufferException { @Override public boolean matches() { return descriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE - && descriptor.getMessageType().getFullName().equals(com.google.protobuf.Struct.getDescriptor().getFullName()); + && descriptor.getMessageType().getFullName() + .equals(com.google.protobuf.Struct.getDescriptor().getFullName()); } } diff --git a/src/main/java/io/odpf/depot/message/proto/converter/fields/TimestampProtoField.java b/src/main/java/org/raystack/depot/message/proto/converter/fields/TimestampProtoField.java similarity index 74% rename from src/main/java/io/odpf/depot/message/proto/converter/fields/TimestampProtoField.java rename to src/main/java/org/raystack/depot/message/proto/converter/fields/TimestampProtoField.java index 08de6472..541f840b 100644 --- a/src/main/java/io/odpf/depot/message/proto/converter/fields/TimestampProtoField.java +++ b/src/main/java/org/raystack/depot/message/proto/converter/fields/TimestampProtoField.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; @@ -8,6 +8,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.stream.Collectors; @AllArgsConstructor public class TimestampProtoField implements ProtoField { @@ -17,13 +18,8 @@ public class TimestampProtoField implements ProtoField { @Override public Object getValue() { if (fieldValue instanceof Collection) { - List tsValues = new ArrayList<>(); - for (Object field : (Collection) fieldValue) { - tsValues.add(getTime(field)); - } - return tsValues; + return ((Collection) fieldValue).stream().map(this::getTime).collect(Collectors.toList()); } - return getTime(fieldValue); } @@ -38,6 +34,7 @@ private Instant getTime(Object field) { @Override public boolean matches() { return descriptor.getType() == Descriptors.FieldDescriptor.Type.MESSAGE - && descriptor.getMessageType().getFullName().equals(com.google.protobuf.Timestamp.getDescriptor().getFullName()); + && descriptor.getMessageType().getFullName() + .equals(com.google.protobuf.Timestamp.getDescriptor().getFullName()); } } diff --git a/src/main/java/io/odpf/depot/metrics/BigQueryMetrics.java b/src/main/java/org/raystack/depot/metrics/BigQueryMetrics.java similarity index 67% rename from src/main/java/io/odpf/depot/metrics/BigQueryMetrics.java rename to src/main/java/org/raystack/depot/metrics/BigQueryMetrics.java index 19b209e2..307eec1f 100644 --- a/src/main/java/io/odpf/depot/metrics/BigQueryMetrics.java +++ b/src/main/java/org/raystack/depot/metrics/BigQueryMetrics.java @@ -1,10 +1,10 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; public class BigQueryMetrics extends SinkMetrics { - public BigQueryMetrics(OdpfSinkConfig config) { + public BigQueryMetrics(SinkConfig config) { super(config); } @@ -16,6 +16,16 @@ public enum BigQueryAPIType { TABLE_INSERT_ALL, } + public enum BigQueryStorageAPIType { + STREAM_WRITER_CREATED, + STREAM_WRITER_CLOSED, + STREAM_WRITER_APPEND + } + + public enum BigQueryStorageAPIError { + ROW_APPEND_ERROR + } + public enum BigQueryErrorType { UNKNOWN_ERROR, INVALID_SCHEMA_ERROR, @@ -26,6 +36,7 @@ public enum BigQueryErrorType { public static final String BIGQUERY_SINK_PREFIX = "bigquery_"; public static final String BIGQUERY_TABLE_TAG = "table=%s"; public static final String BIGQUERY_DATASET_TAG = "dataset=%s"; + public static final String BIGQUERY_PROJECT_TAG = "project=%s"; public static final String BIGQUERY_API_TAG = "api=%s"; public static final String BIGQUERY_ERROR_TAG = "error=%s"; @@ -40,4 +51,8 @@ public String getBigqueryOperationLatencyMetric() { public String getBigqueryTotalErrorsMetrics() { return getApplicationPrefix() + SINK_PREFIX + BIGQUERY_SINK_PREFIX + "errors_total"; } + + public String getBigqueryPayloadSizeMetrics() { + return getApplicationPrefix() + SINK_PREFIX + BIGQUERY_SINK_PREFIX + "payload_size_bytes"; + } } diff --git a/src/main/java/io/odpf/depot/metrics/BigTableMetrics.java b/src/main/java/org/raystack/depot/metrics/BigTableMetrics.java similarity index 89% rename from src/main/java/io/odpf/depot/metrics/BigTableMetrics.java rename to src/main/java/org/raystack/depot/metrics/BigTableMetrics.java index 125a8982..772ec976 100644 --- a/src/main/java/io/odpf/depot/metrics/BigTableMetrics.java +++ b/src/main/java/org/raystack/depot/metrics/BigTableMetrics.java @@ -1,6 +1,6 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; public class BigTableMetrics extends SinkMetrics { @@ -9,11 +9,10 @@ public class BigTableMetrics extends SinkMetrics { public static final String BIGTABLE_TABLE_TAG = "table=%s"; public static final String BIGTABLE_ERROR_TAG = "error=%s"; - public BigTableMetrics(OdpfSinkConfig config) { + public BigTableMetrics(SinkConfig config) { super(config); } - public enum BigTableErrorType { QUOTA_FAILURE, // A quota check failed. PRECONDITION_FAILURE, // Some preconditions have failed. diff --git a/src/main/java/io/odpf/depot/metrics/Instrumentation.java b/src/main/java/org/raystack/depot/metrics/Instrumentation.java similarity index 98% rename from src/main/java/io/odpf/depot/metrics/Instrumentation.java rename to src/main/java/org/raystack/depot/metrics/Instrumentation.java index 6f1ee9c6..8ea5415b 100644 --- a/src/main/java/io/odpf/depot/metrics/Instrumentation.java +++ b/src/main/java/org/raystack/depot/metrics/Instrumentation.java @@ -1,4 +1,4 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; import lombok.Getter; import org.slf4j.Logger; @@ -8,7 +8,6 @@ import java.io.IOException; import java.time.Instant; - /** * Instrumentation. *

@@ -64,7 +63,6 @@ public boolean isDebugEnabled() { return logger.isDebugEnabled(); } - // ===================== CountTelemetry ================= public void captureCount(String metric, Long count, String... tags) { @@ -91,7 +89,6 @@ public void captureDuration(String metric, long duration, String... tags) { statsDReporter.captureDuration(metric, duration, tags); } - // =================== ERROR =================== public void captureNonFatalError(String metric, Throwable e, String template, Object... t) { diff --git a/src/main/java/io/odpf/depot/metrics/JsonParserMetrics.java b/src/main/java/org/raystack/depot/metrics/JsonParserMetrics.java similarity index 70% rename from src/main/java/io/odpf/depot/metrics/JsonParserMetrics.java rename to src/main/java/org/raystack/depot/metrics/JsonParserMetrics.java index 63094e9c..4448db28 100644 --- a/src/main/java/io/odpf/depot/metrics/JsonParserMetrics.java +++ b/src/main/java/org/raystack/depot/metrics/JsonParserMetrics.java @@ -1,9 +1,9 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; public class JsonParserMetrics extends SinkMetrics { - public JsonParserMetrics(OdpfSinkConfig config) { + public JsonParserMetrics(SinkConfig config) { super(config); } diff --git a/src/main/java/io/odpf/depot/metrics/SinkMetrics.java b/src/main/java/org/raystack/depot/metrics/SinkMetrics.java similarity index 84% rename from src/main/java/io/odpf/depot/metrics/SinkMetrics.java rename to src/main/java/org/raystack/depot/metrics/SinkMetrics.java index 17335968..26065b44 100644 --- a/src/main/java/io/odpf/depot/metrics/SinkMetrics.java +++ b/src/main/java/org/raystack/depot/metrics/SinkMetrics.java @@ -1,6 +1,6 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; import lombok.Getter; public class SinkMetrics { @@ -15,7 +15,7 @@ public class SinkMetrics { @Getter private final String applicationPrefix; - public SinkMetrics(OdpfSinkConfig config) { + public SinkMetrics(SinkConfig config) { this.applicationPrefix = config.getMetricsApplicationPrefix(); } diff --git a/src/main/java/io/odpf/depot/metrics/StatsDReporter.java b/src/main/java/org/raystack/depot/metrics/StatsDReporter.java similarity index 98% rename from src/main/java/io/odpf/depot/metrics/StatsDReporter.java rename to src/main/java/org/raystack/depot/metrics/StatsDReporter.java index 57974113..286d2a03 100644 --- a/src/main/java/io/odpf/depot/metrics/StatsDReporter.java +++ b/src/main/java/org/raystack/depot/metrics/StatsDReporter.java @@ -1,4 +1,4 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; import com.timgroup.statsd.StatsDClient; import org.slf4j.Logger; diff --git a/src/main/java/io/odpf/depot/metrics/StatsDReporterBuilder.java b/src/main/java/org/raystack/depot/metrics/StatsDReporterBuilder.java similarity index 96% rename from src/main/java/io/odpf/depot/metrics/StatsDReporterBuilder.java rename to src/main/java/org/raystack/depot/metrics/StatsDReporterBuilder.java index 9bc2eb5a..90a51fd6 100644 --- a/src/main/java/io/odpf/depot/metrics/StatsDReporterBuilder.java +++ b/src/main/java/org/raystack/depot/metrics/StatsDReporterBuilder.java @@ -1,9 +1,9 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; import com.timgroup.statsd.NoOpStatsDClient; import com.timgroup.statsd.NonBlockingStatsDClientBuilder; import com.timgroup.statsd.StatsDClient; -import io.odpf.depot.config.MetricsConfig; +import org.raystack.depot.config.MetricsConfig; import lombok.extern.slf4j.Slf4j; /** diff --git a/src/main/java/io/odpf/depot/redis/RedisSink.java b/src/main/java/org/raystack/depot/redis/RedisSink.java similarity index 51% rename from src/main/java/io/odpf/depot/redis/RedisSink.java rename to src/main/java/org/raystack/depot/redis/RedisSink.java index 6407142a..c2e93966 100644 --- a/src/main/java/io/odpf/depot/redis/RedisSink.java +++ b/src/main/java/org/raystack/depot/redis/RedisSink.java @@ -1,22 +1,22 @@ -package io.odpf.depot.redis; +package org.raystack.depot.redis; -import io.odpf.depot.OdpfSink; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.RedisClient; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.parsers.RedisParser; -import io.odpf.depot.redis.util.RedisSinkUtils; -import io.odpf.depot.redis.record.RedisRecord; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.client.RedisClient; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.parsers.RedisParser; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.util.RedisSinkUtils; +import org.raystack.depot.Sink; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.error.ErrorInfo; import java.io.IOException; import java.util.List; import java.util.Map; import java.util.stream.Collectors; -public class RedisSink implements OdpfSink { +public class RedisSink implements Sink { private final RedisClient redisClient; private final RedisParser redisParser; private final Instrumentation instrumentation; @@ -28,20 +28,23 @@ public RedisSink(RedisClient redisClient, RedisParser redisParser, Instrumentati } @Override - public OdpfSinkResponse pushToSink(List messages) { + public SinkResponse pushToSink(List messages) { List records = redisParser.convert(messages); - Map> splitterRecords = records.stream().collect(Collectors.partitioningBy(RedisRecord::isValid)); + Map> splitterRecords = records.stream() + .collect(Collectors.partitioningBy(RedisRecord::isValid)); List invalidRecords = splitterRecords.get(Boolean.FALSE); List validRecords = splitterRecords.get(Boolean.TRUE); - OdpfSinkResponse odpfSinkResponse = new OdpfSinkResponse(); - invalidRecords.forEach(invalidRecord -> odpfSinkResponse.addErrors(invalidRecord.getIndex(), invalidRecord.getErrorInfo())); + SinkResponse sinkResponse = new SinkResponse(); + invalidRecords.forEach( + invalidRecord -> sinkResponse.addErrors(invalidRecord.getIndex(), invalidRecord.getErrorInfo())); if (validRecords.size() > 0) { List responses = redisClient.send(validRecords); - Map errorInfoMap = RedisSinkUtils.getErrorsFromResponse(validRecords, responses, instrumentation); - errorInfoMap.forEach(odpfSinkResponse::addErrors); + Map errorInfoMap = RedisSinkUtils.getErrorsFromResponse(validRecords, responses, + instrumentation); + errorInfoMap.forEach(sinkResponse::addErrors); instrumentation.logInfo("Pushed a batch of {} records to Redis", validRecords.size()); } - return odpfSinkResponse; + return sinkResponse; } @Override diff --git a/src/main/java/io/odpf/depot/redis/RedisSinkFactory.java b/src/main/java/org/raystack/depot/redis/RedisSinkFactory.java similarity index 66% rename from src/main/java/io/odpf/depot/redis/RedisSinkFactory.java rename to src/main/java/org/raystack/depot/redis/RedisSinkFactory.java index db7d41c6..3d4a5804 100644 --- a/src/main/java/io/odpf/depot/redis/RedisSinkFactory.java +++ b/src/main/java/org/raystack/depot/redis/RedisSinkFactory.java @@ -1,21 +1,20 @@ -package io.odpf.depot.redis; - +package org.raystack.depot.redis; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageParserFactory; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.client.RedisClientFactory; +import org.raystack.depot.redis.parsers.RedisEntryParser; +import org.raystack.depot.redis.parsers.RedisEntryParserFactory; +import org.raystack.depot.redis.parsers.RedisParser; +import org.raystack.depot.utils.MessageConfigUtils; import com.timgroup.statsd.NoOpStatsDClient; -import io.odpf.depot.OdpfSink; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.OdpfMessageParserFactory; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.RedisClientFactory; -import io.odpf.depot.redis.parsers.RedisEntryParser; -import io.odpf.depot.redis.parsers.RedisEntryParserFactory; -import io.odpf.depot.redis.parsers.RedisParser; -import io.odpf.depot.utils.MessageConfigUtils; +import org.raystack.depot.Sink; import java.io.IOException; @@ -37,7 +36,8 @@ public RedisSinkFactory(RedisSinkConfig sinkConfig) { public void init() { try { Instrumentation instrumentation = new Instrumentation(statsDReporter, RedisSinkFactory.class); - String redisConfig = String.format("\n\tredis.urls = %s\n\tredis.key.template = %s\n\tredis.sink.data.type = %s" + String redisConfig = String.format( + "\n\tredis.urls = %s\n\tredis.key.template = %s\n\tredis.sink.data.type = %s" + "\n\tredis.deployment.type = %s\n\tredis.ttl.type = %s\n\tredis.ttl.value = %d\n\t", sinkConfig.getSinkRedisUrls(), sinkConfig.getSinkRedisKeyTemplate(), @@ -53,16 +53,19 @@ public void init() { redisConfig += "redis.keyvalue.data.field.name=" + sinkConfig.getSinkRedisKeyValueDataFieldName(); break; case HASHSET: - redisConfig += "redis.hashset.field.to.column.mapping=" + sinkConfig.getSinkRedisHashsetFieldToColumnMapping().toString(); + redisConfig += "redis.hashset.field.to.column.mapping=" + + sinkConfig.getSinkRedisHashsetFieldToColumnMapping().toString(); break; default: } instrumentation.logInfo(redisConfig); instrumentation.logInfo("Redis server type = {}", sinkConfig.getSinkRedisDeploymentType()); - OdpfMessageParser messageParser = OdpfMessageParserFactory.getParser(sinkConfig, statsDReporter); - Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(sinkConfig); - OdpfMessageSchema schema = messageParser.getSchema(modeAndSchema.getSecond()); - RedisEntryParser redisEntryParser = RedisEntryParserFactory.getRedisEntryParser(sinkConfig, statsDReporter, schema); + MessageParser messageParser = MessageParserFactory.getParser(sinkConfig, statsDReporter); + Tuple modeAndSchema = MessageConfigUtils + .getModeAndSchema(sinkConfig); + MessageSchema schema = messageParser.getSchema(modeAndSchema.getSecond()); + RedisEntryParser redisEntryParser = RedisEntryParserFactory.getRedisEntryParser(sinkConfig, statsDReporter, + schema); this.redisParser = new RedisParser(messageParser, redisEntryParser, modeAndSchema); instrumentation.logInfo("Connection to redis established successfully"); } catch (IOException e) { @@ -75,7 +78,7 @@ public void init() { * * @return RedisSink */ - public OdpfSink create() { + public Sink create() { return new RedisSink( RedisClientFactory.getClient(sinkConfig, statsDReporter), redisParser, diff --git a/src/main/java/io/odpf/depot/redis/client/RedisClient.java b/src/main/java/org/raystack/depot/redis/client/RedisClient.java similarity index 58% rename from src/main/java/io/odpf/depot/redis/client/RedisClient.java rename to src/main/java/org/raystack/depot/redis/client/RedisClient.java index 16d4894b..af52de1d 100644 --- a/src/main/java/io/odpf/depot/redis/client/RedisClient.java +++ b/src/main/java/org/raystack/depot/redis/client/RedisClient.java @@ -1,7 +1,7 @@ -package io.odpf.depot.redis.client; +package org.raystack.depot.redis.client; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.record.RedisRecord; import java.io.Closeable; import java.util.List; diff --git a/src/main/java/io/odpf/depot/redis/client/RedisClientFactory.java b/src/main/java/org/raystack/depot/redis/client/RedisClientFactory.java similarity index 62% rename from src/main/java/io/odpf/depot/redis/client/RedisClientFactory.java rename to src/main/java/org/raystack/depot/redis/client/RedisClientFactory.java index 57bc3995..f3f1cd34 100644 --- a/src/main/java/io/odpf/depot/redis/client/RedisClientFactory.java +++ b/src/main/java/org/raystack/depot/redis/client/RedisClientFactory.java @@ -1,13 +1,12 @@ -package io.odpf.depot.redis.client; - - -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.enums.RedisSinkDeploymentType; -import io.odpf.depot.redis.ttl.RedisTTLFactory; -import io.odpf.depot.redis.ttl.RedisTtl; +package org.raystack.depot.redis.client; + +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDeploymentType; +import org.raystack.depot.redis.ttl.RedisTTLFactory; +import org.raystack.depot.redis.ttl.RedisTtl; import org.apache.commons.lang3.StringUtils; import redis.clients.jedis.HostAndPort; import redis.clients.jedis.Jedis; @@ -30,18 +29,22 @@ public static RedisClient getClient(RedisSinkConfig redisSinkConfig, StatsDRepor : getRedisStandaloneClient(redisTTL, redisSinkConfig, statsDReporter); } - private static RedisStandaloneClient getRedisStandaloneClient(RedisTtl redisTTL, RedisSinkConfig redisSinkConfig, StatsDReporter statsDReporter) { + private static RedisStandaloneClient getRedisStandaloneClient(RedisTtl redisTTL, RedisSinkConfig redisSinkConfig, + StatsDReporter statsDReporter) { HostAndPort hostAndPort; try { hostAndPort = HostAndPort.parseString(StringUtils.trim(redisSinkConfig.getSinkRedisUrls())); } catch (IllegalArgumentException e) { - throw new ConfigurationException(String.format("Invalid url for redis standalone: %s", redisSinkConfig.getSinkRedisUrls())); + throw new ConfigurationException( + String.format("Invalid url for redis standalone: %s", redisSinkConfig.getSinkRedisUrls())); } Jedis jedis = new Jedis(hostAndPort); - return new RedisStandaloneClient(new Instrumentation(statsDReporter, RedisStandaloneClient.class), redisTTL, jedis); + return new RedisStandaloneClient(new Instrumentation(statsDReporter, RedisStandaloneClient.class), redisTTL, + jedis); } - private static RedisClusterClient getRedisClusterClient(RedisTtl redisTTL, RedisSinkConfig redisSinkConfig, StatsDReporter statsDReporter) { + private static RedisClusterClient getRedisClusterClient(RedisTtl redisTTL, RedisSinkConfig redisSinkConfig, + StatsDReporter statsDReporter) { String[] redisUrls = redisSinkConfig.getSinkRedisUrls().split(DELIMITER); HashSet nodes = new HashSet<>(); try { @@ -49,9 +52,11 @@ private static RedisClusterClient getRedisClusterClient(RedisTtl redisTTL, Redis nodes.add(HostAndPort.parseString(StringUtils.trim(redisUrl))); } } catch (IllegalArgumentException e) { - throw new ConfigurationException(String.format("Invalid url(s) for redis cluster: %s", redisSinkConfig.getSinkRedisUrls())); + throw new ConfigurationException( + String.format("Invalid url(s) for redis cluster: %s", redisSinkConfig.getSinkRedisUrls())); } JedisCluster jedisCluster = new JedisCluster(nodes); - return new RedisClusterClient(new Instrumentation(statsDReporter, RedisClusterClient.class), redisTTL, jedisCluster); + return new RedisClusterClient(new Instrumentation(statsDReporter, RedisClusterClient.class), redisTTL, + jedisCluster); } } diff --git a/src/main/java/io/odpf/depot/redis/client/RedisClusterClient.java b/src/main/java/org/raystack/depot/redis/client/RedisClusterClient.java similarity index 74% rename from src/main/java/io/odpf/depot/redis/client/RedisClusterClient.java rename to src/main/java/org/raystack/depot/redis/client/RedisClusterClient.java index 7dea53fe..44568bed 100644 --- a/src/main/java/io/odpf/depot/redis/client/RedisClusterClient.java +++ b/src/main/java/org/raystack/depot/redis/client/RedisClusterClient.java @@ -1,9 +1,9 @@ -package io.odpf.depot.redis.client; +package org.raystack.depot.redis.client; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.record.RedisRecord; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.ttl.RedisTtl; +import org.raystack.depot.metrics.Instrumentation; import lombok.AllArgsConstructor; import redis.clients.jedis.JedisCluster; diff --git a/src/main/java/io/odpf/depot/redis/client/RedisStandaloneClient.java b/src/main/java/org/raystack/depot/redis/client/RedisStandaloneClient.java similarity index 81% rename from src/main/java/io/odpf/depot/redis/client/RedisStandaloneClient.java rename to src/main/java/org/raystack/depot/redis/client/RedisStandaloneClient.java index 12046cab..5ece6753 100644 --- a/src/main/java/io/odpf/depot/redis/client/RedisStandaloneClient.java +++ b/src/main/java/org/raystack/depot/redis/client/RedisStandaloneClient.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.client; +package org.raystack.depot.redis.client; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.record.RedisRecord; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.ttl.RedisTtl; +import org.raystack.depot.metrics.Instrumentation; import lombok.AllArgsConstructor; import redis.clients.jedis.Jedis; import redis.clients.jedis.Pipeline; diff --git a/src/main/java/io/odpf/depot/redis/client/entry/RedisEntry.java b/src/main/java/org/raystack/depot/redis/client/entry/RedisEntry.java similarity index 72% rename from src/main/java/io/odpf/depot/redis/client/entry/RedisEntry.java rename to src/main/java/org/raystack/depot/redis/client/entry/RedisEntry.java index 2ced72ab..3cf93e1b 100644 --- a/src/main/java/io/odpf/depot/redis/client/entry/RedisEntry.java +++ b/src/main/java/org/raystack/depot/redis/client/entry/RedisEntry.java @@ -1,8 +1,8 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.ttl.RedisTtl; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; diff --git a/src/main/java/io/odpf/depot/redis/client/entry/RedisHashSetFieldEntry.java b/src/main/java/org/raystack/depot/redis/client/entry/RedisHashSetFieldEntry.java similarity index 86% rename from src/main/java/io/odpf/depot/redis/client/entry/RedisHashSetFieldEntry.java rename to src/main/java/org/raystack/depot/redis/client/entry/RedisHashSetFieldEntry.java index 9160c5c9..fea0e924 100644 --- a/src/main/java/io/odpf/depot/redis/client/entry/RedisHashSetFieldEntry.java +++ b/src/main/java/org/raystack/depot/redis/client/entry/RedisHashSetFieldEntry.java @@ -1,9 +1,9 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import redis.clients.jedis.JedisCluster; diff --git a/src/main/java/io/odpf/depot/redis/client/entry/RedisKeyValueEntry.java b/src/main/java/org/raystack/depot/redis/client/entry/RedisKeyValueEntry.java similarity index 83% rename from src/main/java/io/odpf/depot/redis/client/entry/RedisKeyValueEntry.java rename to src/main/java/org/raystack/depot/redis/client/entry/RedisKeyValueEntry.java index 7d55ffb4..8c4d3a10 100644 --- a/src/main/java/io/odpf/depot/redis/client/entry/RedisKeyValueEntry.java +++ b/src/main/java/org/raystack/depot/redis/client/entry/RedisKeyValueEntry.java @@ -1,9 +1,9 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import redis.clients.jedis.JedisCluster; diff --git a/src/main/java/io/odpf/depot/redis/client/entry/RedisListEntry.java b/src/main/java/org/raystack/depot/redis/client/entry/RedisListEntry.java similarity index 84% rename from src/main/java/io/odpf/depot/redis/client/entry/RedisListEntry.java rename to src/main/java/org/raystack/depot/redis/client/entry/RedisListEntry.java index 23975404..f33ad671 100644 --- a/src/main/java/io/odpf/depot/redis/client/entry/RedisListEntry.java +++ b/src/main/java/org/raystack/depot/redis/client/entry/RedisListEntry.java @@ -1,9 +1,9 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; import redis.clients.jedis.JedisCluster; diff --git a/src/main/java/io/odpf/depot/redis/client/response/RedisClusterResponse.java b/src/main/java/org/raystack/depot/redis/client/response/RedisClusterResponse.java similarity index 92% rename from src/main/java/io/odpf/depot/redis/client/response/RedisClusterResponse.java rename to src/main/java/org/raystack/depot/redis/client/response/RedisClusterResponse.java index e245f90a..6a0555c7 100644 --- a/src/main/java/io/odpf/depot/redis/client/response/RedisClusterResponse.java +++ b/src/main/java/org/raystack/depot/redis/client/response/RedisClusterResponse.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.client.response; +package org.raystack.depot.redis.client.response; import lombok.Getter; diff --git a/src/main/java/io/odpf/depot/redis/client/response/RedisResponse.java b/src/main/java/org/raystack/depot/redis/client/response/RedisResponse.java similarity index 63% rename from src/main/java/io/odpf/depot/redis/client/response/RedisResponse.java rename to src/main/java/org/raystack/depot/redis/client/response/RedisResponse.java index 4e5c6c28..dad50441 100644 --- a/src/main/java/io/odpf/depot/redis/client/response/RedisResponse.java +++ b/src/main/java/org/raystack/depot/redis/client/response/RedisResponse.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.client.response; +package org.raystack.depot.redis.client.response; public interface RedisResponse { String getMessage(); diff --git a/src/main/java/io/odpf/depot/redis/client/response/RedisStandaloneResponse.java b/src/main/java/org/raystack/depot/redis/client/response/RedisStandaloneResponse.java similarity index 95% rename from src/main/java/io/odpf/depot/redis/client/response/RedisStandaloneResponse.java rename to src/main/java/org/raystack/depot/redis/client/response/RedisStandaloneResponse.java index 8791c190..bc168ccc 100644 --- a/src/main/java/io/odpf/depot/redis/client/response/RedisStandaloneResponse.java +++ b/src/main/java/org/raystack/depot/redis/client/response/RedisStandaloneResponse.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.client.response; +package org.raystack.depot.redis.client.response; import lombok.Getter; import redis.clients.jedis.Response; diff --git a/src/main/java/io/odpf/depot/redis/enums/RedisSinkDataType.java b/src/main/java/org/raystack/depot/redis/enums/RedisSinkDataType.java similarity index 64% rename from src/main/java/io/odpf/depot/redis/enums/RedisSinkDataType.java rename to src/main/java/org/raystack/depot/redis/enums/RedisSinkDataType.java index 23d791a2..bb9688cd 100644 --- a/src/main/java/io/odpf/depot/redis/enums/RedisSinkDataType.java +++ b/src/main/java/org/raystack/depot/redis/enums/RedisSinkDataType.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.enums; +package org.raystack.depot.redis.enums; public enum RedisSinkDataType { LIST, diff --git a/src/main/java/io/odpf/depot/redis/enums/RedisSinkDeploymentType.java b/src/main/java/org/raystack/depot/redis/enums/RedisSinkDeploymentType.java similarity index 63% rename from src/main/java/io/odpf/depot/redis/enums/RedisSinkDeploymentType.java rename to src/main/java/org/raystack/depot/redis/enums/RedisSinkDeploymentType.java index 85a70bcd..e4f8968b 100644 --- a/src/main/java/io/odpf/depot/redis/enums/RedisSinkDeploymentType.java +++ b/src/main/java/org/raystack/depot/redis/enums/RedisSinkDeploymentType.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.enums; +package org.raystack.depot.redis.enums; public enum RedisSinkDeploymentType { STANDALONE, diff --git a/src/main/java/io/odpf/depot/redis/enums/RedisSinkTtlType.java b/src/main/java/org/raystack/depot/redis/enums/RedisSinkTtlType.java similarity index 65% rename from src/main/java/io/odpf/depot/redis/enums/RedisSinkTtlType.java rename to src/main/java/org/raystack/depot/redis/enums/RedisSinkTtlType.java index 41d76a4f..f06b2178 100644 --- a/src/main/java/io/odpf/depot/redis/enums/RedisSinkTtlType.java +++ b/src/main/java/org/raystack/depot/redis/enums/RedisSinkTtlType.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.enums; +package org.raystack.depot.redis.enums; public enum RedisSinkTtlType { EXACT_TIME, diff --git a/src/main/java/org/raystack/depot/redis/parsers/RedisEntryParser.java b/src/main/java/org/raystack/depot/redis/parsers/RedisEntryParser.java new file mode 100644 index 00000000..6c6c493d --- /dev/null +++ b/src/main/java/org/raystack/depot/redis/parsers/RedisEntryParser.java @@ -0,0 +1,11 @@ +package org.raystack.depot.redis.parsers; + +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.message.ParsedMessage; + +import java.util.List; + +public interface RedisEntryParser { + + List getRedisEntry(ParsedMessage parsedMessage); +} diff --git a/src/main/java/io/odpf/depot/redis/parsers/RedisEntryParserFactory.java b/src/main/java/org/raystack/depot/redis/parsers/RedisEntryParserFactory.java similarity index 86% rename from src/main/java/io/odpf/depot/redis/parsers/RedisEntryParserFactory.java rename to src/main/java/org/raystack/depot/redis/parsers/RedisEntryParserFactory.java index 974522c6..c699f192 100644 --- a/src/main/java/io/odpf/depot/redis/parsers/RedisEntryParserFactory.java +++ b/src/main/java/org/raystack/depot/redis/parsers/RedisEntryParserFactory.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.parsers; +package org.raystack.depot.redis.parsers; -import io.odpf.depot.common.Template; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.exception.InvalidTemplateException; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.metrics.StatsDReporter; +import org.raystack.depot.common.Template; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.exception.InvalidTemplateException; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.metrics.StatsDReporter; import java.util.Map; import java.util.Properties; @@ -18,7 +18,7 @@ public class RedisEntryParserFactory { public static RedisEntryParser getRedisEntryParser( RedisSinkConfig redisSinkConfig, StatsDReporter statsDReporter, - OdpfMessageSchema schema) { + MessageSchema schema) { Template keyTemplate; try { keyTemplate = new Template(redisSinkConfig.getSinkRedisKeyTemplate()); @@ -51,8 +51,7 @@ public static RedisEntryParser getRedisEntryParser( } catch (InvalidTemplateException e) { throw new IllegalArgumentException(e.getMessage()); } - } - )); + })); return new RedisHashSetEntryParser(statsDReporter, keyTemplate, fieldTemplates, schema); } } diff --git a/src/main/java/org/raystack/depot/redis/parsers/RedisHashSetEntryParser.java b/src/main/java/org/raystack/depot/redis/parsers/RedisHashSetEntryParser.java new file mode 100644 index 00000000..913685bf --- /dev/null +++ b/src/main/java/org/raystack/depot/redis/parsers/RedisHashSetEntryParser.java @@ -0,0 +1,41 @@ +package org.raystack.depot.redis.parsers; + +import org.raystack.depot.message.field.GenericFieldFactory; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.entry.RedisHashSetFieldEntry; +import org.raystack.depot.common.Template; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import lombok.AllArgsConstructor; + +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +/** + * Redis hash set parser. + */ +@AllArgsConstructor +public class RedisHashSetEntryParser implements RedisEntryParser { + private final StatsDReporter statsDReporter; + private final Template keyTemplate; + private final Map fieldTemplates; + private final MessageSchema schema; + + @Override + public List getRedisEntry(ParsedMessage parsedMessage) { + String redisKey = keyTemplate.parse(parsedMessage, schema); + return fieldTemplates + .entrySet() + .stream() + .map(fieldTemplate -> { + String field = fieldTemplate.getValue().parse(parsedMessage, schema); + String redisValue = GenericFieldFactory + .getField(parsedMessage.getFieldByName(fieldTemplate.getKey(), schema)).getString(); + return new RedisHashSetFieldEntry(redisKey, field, redisValue, + new Instrumentation(statsDReporter, RedisHashSetFieldEntry.class)); + }).collect(Collectors.toList()); + } +} diff --git a/src/main/java/org/raystack/depot/redis/parsers/RedisKeyValueEntryParser.java b/src/main/java/org/raystack/depot/redis/parsers/RedisKeyValueEntryParser.java new file mode 100644 index 00000000..1ff13b08 --- /dev/null +++ b/src/main/java/org/raystack/depot/redis/parsers/RedisKeyValueEntryParser.java @@ -0,0 +1,31 @@ +package org.raystack.depot.redis.parsers; + +import org.raystack.depot.message.field.GenericFieldFactory; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.entry.RedisKeyValueEntry; +import org.raystack.depot.common.Template; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import lombok.AllArgsConstructor; + +import java.util.Collections; +import java.util.List; + +@AllArgsConstructor +public class RedisKeyValueEntryParser implements RedisEntryParser { + private final StatsDReporter statsDReporter; + private final Template keyTemplate; + private final String fieldName; + private final MessageSchema schema; + + @Override + public List getRedisEntry(ParsedMessage parsedMessage) { + String redisKey = keyTemplate.parse(parsedMessage, schema); + String redisValue = GenericFieldFactory.getField(parsedMessage.getFieldByName(fieldName, schema)).getString(); + RedisKeyValueEntry redisKeyValueEntry = new RedisKeyValueEntry(redisKey, redisValue, + new Instrumentation(statsDReporter, RedisKeyValueEntry.class)); + return Collections.singletonList(redisKeyValueEntry); + } +} diff --git a/src/main/java/org/raystack/depot/redis/parsers/RedisListEntryParser.java b/src/main/java/org/raystack/depot/redis/parsers/RedisListEntryParser.java new file mode 100644 index 00000000..d7873c6c --- /dev/null +++ b/src/main/java/org/raystack/depot/redis/parsers/RedisListEntryParser.java @@ -0,0 +1,33 @@ +package org.raystack.depot.redis.parsers; + +import org.raystack.depot.message.field.GenericFieldFactory; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.entry.RedisListEntry; +import org.raystack.depot.common.Template; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import lombok.AllArgsConstructor; + +import java.util.Collections; +import java.util.List; + +/** + * Redis list parser. + */ +@AllArgsConstructor +public class RedisListEntryParser implements RedisEntryParser { + private final StatsDReporter statsDReporter; + private final Template keyTemplate; + private final String field; + private final MessageSchema schema; + + @Override + public List getRedisEntry(ParsedMessage parsedMessage) { + String redisKey = keyTemplate.parse(parsedMessage, schema); + String redisValue = GenericFieldFactory.getField(parsedMessage.getFieldByName(field, schema)).getString(); + return Collections.singletonList( + new RedisListEntry(redisKey, redisValue, new Instrumentation(statsDReporter, RedisListEntry.class))); + } +} diff --git a/src/main/java/io/odpf/depot/redis/parsers/RedisParser.java b/src/main/java/org/raystack/depot/redis/parsers/RedisParser.java similarity index 59% rename from src/main/java/io/odpf/depot/redis/parsers/RedisParser.java rename to src/main/java/org/raystack/depot/redis/parsers/RedisParser.java index bed1237d..16372e5e 100644 --- a/src/main/java/io/odpf/depot/redis/parsers/RedisParser.java +++ b/src/main/java/org/raystack/depot/redis/parsers/RedisParser.java @@ -1,16 +1,16 @@ -package io.odpf.depot.redis.parsers; +package org.raystack.depot.redis.parsers; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.exception.DeserializerException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.exception.DeserializerException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; import lombok.AllArgsConstructor; import lombok.extern.slf4j.Slf4j; @@ -19,26 +19,27 @@ import java.util.List; import java.util.stream.IntStream; - /** - * Convert Odpf messages to RedisRecords. + * Convert Messages to RedisRecords. */ @AllArgsConstructor @Slf4j public class RedisParser { - private final OdpfMessageParser odpfMessageParser; + private final MessageParser messageParser; private final RedisEntryParser redisEntryParser; private final Tuple modeAndSchema; - public List convert(List messages) { + public List convert(List messages) { List records = new ArrayList<>(); IntStream.range(0, messages.size()).forEach(index -> { try { - ParsedOdpfMessage parsedOdpfMessage = odpfMessageParser.parse(messages.get(index), modeAndSchema.getFirst(), modeAndSchema.getSecond()); - List redisDataEntries = redisEntryParser.getRedisEntry(parsedOdpfMessage); + ParsedMessage parsedMessage = messageParser.parse(messages.get(index), modeAndSchema.getFirst(), + modeAndSchema.getSecond()); + List redisDataEntries = redisEntryParser.getRedisEntry(parsedMessage); for (RedisEntry redisEntry : redisDataEntries) { - records.add(new RedisRecord(redisEntry, (long) index, null, messages.get(index).getMetadataString(), true)); + records.add(new RedisRecord(redisEntry, (long) index, null, messages.get(index).getMetadataString(), + true)); } } catch (UnsupportedOperationException e) { records.add(createAndLogErrorRecord(e, ErrorType.INVALID_MESSAGE_ERROR, index, messages)); @@ -53,9 +54,10 @@ public List convert(List messages) { return records; } - private RedisRecord createAndLogErrorRecord(Exception e, ErrorType type, int index, List messages) { + private RedisRecord createAndLogErrorRecord(Exception e, ErrorType type, int index, List messages) { ErrorInfo errorInfo = new ErrorInfo(e, type); - RedisRecord record = new RedisRecord(null, (long) index, errorInfo, messages.get(index).getMetadataString(), false); + RedisRecord record = new RedisRecord(null, (long) index, errorInfo, messages.get(index).getMetadataString(), + false); log.error("Error while parsing record for message. Record: {}, Error: {}", record, errorInfo); return record; } diff --git a/src/main/java/io/odpf/depot/redis/record/RedisRecord.java b/src/main/java/org/raystack/depot/redis/record/RedisRecord.java similarity index 72% rename from src/main/java/io/odpf/depot/redis/record/RedisRecord.java rename to src/main/java/org/raystack/depot/redis/record/RedisRecord.java index d526306d..cf471450 100644 --- a/src/main/java/io/odpf/depot/redis/record/RedisRecord.java +++ b/src/main/java/org/raystack/depot/redis/record/RedisRecord.java @@ -1,16 +1,15 @@ -package io.odpf.depot.redis.record; +package org.raystack.depot.redis.record; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.redis.ttl.RedisTtl; import lombok.AllArgsConstructor; import lombok.Getter; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; - @AllArgsConstructor public class RedisRecord { private RedisEntry redisEntry; diff --git a/src/main/java/io/odpf/depot/redis/ttl/DurationTtl.java b/src/main/java/org/raystack/depot/redis/ttl/DurationTtl.java similarity index 93% rename from src/main/java/io/odpf/depot/redis/ttl/DurationTtl.java rename to src/main/java/org/raystack/depot/redis/ttl/DurationTtl.java index 557056a5..e6bbb299 100644 --- a/src/main/java/io/odpf/depot/redis/ttl/DurationTtl.java +++ b/src/main/java/org/raystack/depot/redis/ttl/DurationTtl.java @@ -1,11 +1,10 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; import lombok.AllArgsConstructor; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; import redis.clients.jedis.Response; - @AllArgsConstructor public class DurationTtl implements RedisTtl { private int ttlInSeconds; diff --git a/src/main/java/io/odpf/depot/redis/ttl/ExactTimeTtl.java b/src/main/java/org/raystack/depot/redis/ttl/ExactTimeTtl.java similarity index 93% rename from src/main/java/io/odpf/depot/redis/ttl/ExactTimeTtl.java rename to src/main/java/org/raystack/depot/redis/ttl/ExactTimeTtl.java index e678a754..971b3c31 100644 --- a/src/main/java/io/odpf/depot/redis/ttl/ExactTimeTtl.java +++ b/src/main/java/org/raystack/depot/redis/ttl/ExactTimeTtl.java @@ -1,11 +1,10 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; import lombok.AllArgsConstructor; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; import redis.clients.jedis.Response; - @AllArgsConstructor public class ExactTimeTtl implements RedisTtl { private long unixTime; diff --git a/src/main/java/io/odpf/depot/redis/ttl/NoRedisTtl.java b/src/main/java/org/raystack/depot/redis/ttl/NoRedisTtl.java similarity index 90% rename from src/main/java/io/odpf/depot/redis/ttl/NoRedisTtl.java rename to src/main/java/org/raystack/depot/redis/ttl/NoRedisTtl.java index 076f45cd..8b22434b 100644 --- a/src/main/java/io/odpf/depot/redis/ttl/NoRedisTtl.java +++ b/src/main/java/org/raystack/depot/redis/ttl/NoRedisTtl.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; diff --git a/src/main/java/io/odpf/depot/redis/ttl/RedisTTLFactory.java b/src/main/java/org/raystack/depot/redis/ttl/RedisTTLFactory.java similarity index 78% rename from src/main/java/io/odpf/depot/redis/ttl/RedisTTLFactory.java rename to src/main/java/org/raystack/depot/redis/ttl/RedisTTLFactory.java index f6fb0642..5f6c2f80 100644 --- a/src/main/java/io/odpf/depot/redis/ttl/RedisTTLFactory.java +++ b/src/main/java/org/raystack/depot/redis/ttl/RedisTTLFactory.java @@ -1,9 +1,8 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; - -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.exception.ConfigurationException; public class RedisTTLFactory { diff --git a/src/main/java/io/odpf/depot/redis/ttl/RedisTtl.java b/src/main/java/org/raystack/depot/redis/ttl/RedisTtl.java similarity index 88% rename from src/main/java/io/odpf/depot/redis/ttl/RedisTtl.java rename to src/main/java/org/raystack/depot/redis/ttl/RedisTtl.java index 2ebc8292..255340cc 100644 --- a/src/main/java/io/odpf/depot/redis/ttl/RedisTtl.java +++ b/src/main/java/org/raystack/depot/redis/ttl/RedisTtl.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; import redis.clients.jedis.JedisCluster; import redis.clients.jedis.Pipeline; diff --git a/src/main/java/io/odpf/depot/redis/util/RedisSinkUtils.java b/src/main/java/org/raystack/depot/redis/util/RedisSinkUtils.java similarity index 56% rename from src/main/java/io/odpf/depot/redis/util/RedisSinkUtils.java rename to src/main/java/org/raystack/depot/redis/util/RedisSinkUtils.java index 466d60ab..ba9c7b17 100644 --- a/src/main/java/io/odpf/depot/redis/util/RedisSinkUtils.java +++ b/src/main/java/org/raystack/depot/redis/util/RedisSinkUtils.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.util; +package org.raystack.depot.redis.util; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.metrics.Instrumentation; import java.util.HashMap; import java.util.List; @@ -12,7 +12,8 @@ import java.util.stream.IntStream; public class RedisSinkUtils { - public static Map getErrorsFromResponse(List redisRecords, List responses, Instrumentation instrumentation) { + public static Map getErrorsFromResponse(List redisRecords, + List responses, Instrumentation instrumentation) { Map errors = new HashMap<>(); IntStream.range(0, responses.size()).forEach( index -> { @@ -21,10 +22,10 @@ public static Map getErrorsFromResponse(List redis RedisRecord record = redisRecords.get(index); instrumentation.logError("Error while inserting to redis for message. Record: {}, Error: {}", record.toString(), response.getMessage()); - errors.put(record.getIndex(), new ErrorInfo(new Exception(response.getMessage()), ErrorType.DEFAULT_ERROR)); + errors.put(record.getIndex(), + new ErrorInfo(new Exception(response.getMessage()), ErrorType.DEFAULT_ERROR)); } - } - ); + }); return errors; } } diff --git a/src/main/java/io/odpf/depot/stencil/OdpfStencilUpdateListener.java b/src/main/java/org/raystack/depot/stencil/DepotStencilUpdateListener.java similarity index 55% rename from src/main/java/io/odpf/depot/stencil/OdpfStencilUpdateListener.java rename to src/main/java/org/raystack/depot/stencil/DepotStencilUpdateListener.java index d5ff22a6..f795535f 100644 --- a/src/main/java/io/odpf/depot/stencil/OdpfStencilUpdateListener.java +++ b/src/main/java/org/raystack/depot/stencil/DepotStencilUpdateListener.java @@ -1,17 +1,17 @@ -package io.odpf.depot.stencil; +package org.raystack.depot.stencil; import com.google.protobuf.Descriptors; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.stencil.SchemaUpdateListener; +import org.raystack.depot.message.MessageParser; +import org.raystack.stencil.SchemaUpdateListener; import lombok.Getter; import lombok.Setter; import java.util.Map; -public abstract class OdpfStencilUpdateListener implements SchemaUpdateListener { +public abstract class DepotStencilUpdateListener implements SchemaUpdateListener { @Getter @Setter - private OdpfMessageParser odpfMessageParser; + private MessageParser messageParser; public void onSchemaUpdate(final Map newDescriptor) { // default implementation is empty diff --git a/src/main/java/io/odpf/depot/utils/DateUtils.java b/src/main/java/org/raystack/depot/utils/DateUtils.java similarity index 94% rename from src/main/java/io/odpf/depot/utils/DateUtils.java rename to src/main/java/org/raystack/depot/utils/DateUtils.java index 5b57d93d..b244e1c3 100644 --- a/src/main/java/io/odpf/depot/utils/DateUtils.java +++ b/src/main/java/org/raystack/depot/utils/DateUtils.java @@ -1,4 +1,4 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; import java.text.DateFormat; import java.text.SimpleDateFormat; diff --git a/src/main/java/io/odpf/depot/utils/JsonUtils.java b/src/main/java/org/raystack/depot/utils/JsonUtils.java similarity index 87% rename from src/main/java/io/odpf/depot/utils/JsonUtils.java rename to src/main/java/org/raystack/depot/utils/JsonUtils.java index 37c3e3c2..89e77e01 100644 --- a/src/main/java/io/odpf/depot/utils/JsonUtils.java +++ b/src/main/java/org/raystack/depot/utils/JsonUtils.java @@ -1,6 +1,6 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; import org.json.JSONObject; public class JsonUtils { @@ -12,7 +12,7 @@ public class JsonUtils { * @param payload Json Payload in byyes * @return Json object */ - public static JSONObject getJsonObject(OdpfSinkConfig config, byte[] payload) { + public static JSONObject getJsonObject(SinkConfig config, byte[] payload) { JSONObject jsonObject = new JSONObject(new String(payload)); if (!config.getSinkConnectorSchemaJsonParserStringModeEnabled()) { return jsonObject; diff --git a/src/main/java/io/odpf/depot/utils/MessageConfigUtils.java b/src/main/java/org/raystack/depot/utils/MessageConfigUtils.java similarity index 56% rename from src/main/java/io/odpf/depot/utils/MessageConfigUtils.java rename to src/main/java/org/raystack/depot/utils/MessageConfigUtils.java index fcc3f7eb..c884e935 100644 --- a/src/main/java/io/odpf/depot/utils/MessageConfigUtils.java +++ b/src/main/java/org/raystack/depot/utils/MessageConfigUtils.java @@ -1,15 +1,16 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.common.Tuple; public class MessageConfigUtils { - public static Tuple getModeAndSchema(OdpfSinkConfig sinkConfig) { + public static Tuple getModeAndSchema(SinkConfig sinkConfig) { SinkConnectorSchemaMessageMode mode = sinkConfig.getSinkConnectorSchemaMessageMode(); String schemaClass = mode == SinkConnectorSchemaMessageMode.LOG_MESSAGE - ? sinkConfig.getSinkConnectorSchemaProtoMessageClass() : sinkConfig.getSinkConnectorSchemaProtoKeyClass(); + ? sinkConfig.getSinkConnectorSchemaProtoMessageClass() + : sinkConfig.getSinkConnectorSchemaProtoKeyClass(); return new Tuple<>(mode, schemaClass); } } diff --git a/src/main/java/io/odpf/depot/utils/ProtoUtils.java b/src/main/java/org/raystack/depot/utils/ProtoUtils.java similarity index 92% rename from src/main/java/io/odpf/depot/utils/ProtoUtils.java rename to src/main/java/org/raystack/depot/utils/ProtoUtils.java index 48bacb87..d47a1b1f 100644 --- a/src/main/java/io/odpf/depot/utils/ProtoUtils.java +++ b/src/main/java/org/raystack/depot/utils/ProtoUtils.java @@ -1,4 +1,4 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; import com.google.protobuf.DynamicMessage; @@ -37,6 +37,7 @@ private static List collectNestedFields(DynamicMessage node) { } private static List getMessageWithUnknownFields(List messages) { - return messages.stream().filter(message -> message.getUnknownFields().asMap().size() > 0).collect(Collectors.toList()); + return messages.stream().filter(message -> message.getUnknownFields().asMap().size() > 0) + .collect(Collectors.toList()); } } diff --git a/src/main/java/io/odpf/depot/utils/StencilUtils.java b/src/main/java/org/raystack/depot/utils/StencilUtils.java similarity index 77% rename from src/main/java/io/odpf/depot/utils/StencilUtils.java rename to src/main/java/org/raystack/depot/utils/StencilUtils.java index d7194880..71e310e6 100644 --- a/src/main/java/io/odpf/depot/utils/StencilUtils.java +++ b/src/main/java/org/raystack/depot/utils/StencilUtils.java @@ -1,13 +1,13 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; +import org.raystack.depot.config.SinkConfig; +import org.raystack.stencil.SchemaUpdateListener; +import org.raystack.stencil.config.StencilConfig; import com.timgroup.statsd.StatsDClient; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.stencil.SchemaUpdateListener; -import io.odpf.stencil.config.StencilConfig; public class StencilUtils { public static StencilConfig getStencilConfig( - OdpfSinkConfig sinkConfig, + SinkConfig sinkConfig, StatsDClient statsDClient, SchemaUpdateListener schemaUpdateListener) { return StencilConfig.builder() @@ -23,7 +23,7 @@ public static StencilConfig getStencilConfig( .build(); } - public static StencilConfig getStencilConfig(OdpfSinkConfig config, StatsDClient statsDClient) { + public static StencilConfig getStencilConfig(SinkConfig config, StatsDClient statsDClient) { return getStencilConfig(config, statsDClient, null); } } diff --git a/src/main/java/io/odpf/depot/utils/StringUtils.java b/src/main/java/org/raystack/depot/utils/StringUtils.java similarity index 86% rename from src/main/java/io/odpf/depot/utils/StringUtils.java rename to src/main/java/org/raystack/depot/utils/StringUtils.java index 9e6a8a6d..74c1dbd4 100644 --- a/src/main/java/io/odpf/depot/utils/StringUtils.java +++ b/src/main/java/org/raystack/depot/utils/StringUtils.java @@ -1,4 +1,4 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; import java.util.regex.Matcher; import java.util.regex.Pattern; @@ -31,7 +31,6 @@ public static int countVariables(String fmt) { } public static int count(String in, char c) { - return IntStream.range(0, in.length()). - reduce(0, (x, y) -> x + (in.charAt(y) == c ? 1 : 0)); + return IntStream.range(0, in.length()).reduce(0, (x, y) -> x + (in.charAt(y) == c ? 1 : 0)); } } diff --git a/src/test/java/io/odpf/depot/bigquery/client/BigQueryResponseParserTest.java b/src/test/java/io/odpf/depot/bigquery/client/BigQueryResponseParserTest.java deleted file mode 100644 index ab964d0a..00000000 --- a/src/test/java/io/odpf/depot/bigquery/client/BigQueryResponseParserTest.java +++ /dev/null @@ -1,83 +0,0 @@ -package io.odpf.depot.bigquery.client; - -import com.google.cloud.bigquery.BigQueryError; -import com.google.cloud.bigquery.InsertAllResponse; -import io.odpf.depot.bigquery.TestMetadata; -import io.odpf.depot.bigquery.TestOdpfMessageBuilder; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.bigquery.exception.BigQuerySinkException; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import org.aeonbits.owner.util.Collections; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.MockitoAnnotations; - -import java.time.Instant; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -public class BigQueryResponseParserTest { - - @Mock - private InsertAllResponse response; - - @Mock - private Instrumentation instrumentation; - - @Mock - private BigQueryMetrics metrics; - - @Before - public void setup() { - MockitoAnnotations.initMocks(this); - } - - @Test - public void shouldParseResponse() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - Record record1 = new Record(TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1").getMetadata(), new HashMap<>(), 0, null); - Record record2 = new Record(TestOdpfMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2").getMetadata(), new HashMap<>(), 1, null); - Record record3 = new Record(TestOdpfMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3").getMetadata(), new HashMap<>(), 2, null); - Record record4 = new Record(TestOdpfMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4").getMetadata(), new HashMap<>(), 3, null); - Record record5 = new Record(TestOdpfMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5").getMetadata(), new HashMap<>(), 4, null); - Record record6 = new Record(TestOdpfMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6").getMetadata(), new HashMap<>(), 5, null); - List records = Collections.list(record1, record2, record3, record4, record5, record6); - BigQueryError error1 = new BigQueryError("", "US", ""); - BigQueryError error2 = new BigQueryError("invalid", "US", "no such field"); - BigQueryError error3 = new BigQueryError("invalid", "", "The destination table's partition tmp$20160101 is outside the allowed bounds. You can only stream to partitions within 1825 days in the past and 366 days in the future relative to the current date"); - BigQueryError error4 = new BigQueryError("stopped", "", ""); - - Map> insertErrorsMap = new HashMap>() {{ - put(0L, Collections.list(error1)); - put(1L, Collections.list(error2)); - put(2L, Collections.list(error3)); - put(3L, Collections.list(error4)); - }}; - Mockito.when(response.hasErrors()).thenReturn(true); - Mockito.when(response.getInsertErrors()).thenReturn(insertErrorsMap); - Mockito.when(metrics.getBigqueryTotalErrorsMetrics()).thenReturn("test"); - Map errorInfoMap = BigQueryResponseParser.getErrorsFromBQResponse(records, response, metrics, instrumentation); - - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR), errorInfoMap.get(0L)); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), errorInfoMap.get(1L)); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), errorInfoMap.get(2L)); - Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_5XX_ERROR), errorInfoMap.get(3L)); - - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.UNKNOWN_ERROR)); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.INVALID_SCHEMA_ERROR)); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.OOB_ERROR)); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.STOPPED_ERROR)); - } -} diff --git a/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterForJsonTest.java b/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterForJsonTest.java deleted file mode 100644 index e86e3840..00000000 --- a/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterForJsonTest.java +++ /dev/null @@ -1,269 +0,0 @@ -package io.odpf.depot.bigquery.converter; - -import com.google.common.collect.ImmutableMap; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.bigquery.models.Records; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.json.JsonOdpfMessageParser; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.JsonParserMetrics; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Test; - -import java.text.DateFormat; -import java.text.ParseException; -import java.text.SimpleDateFormat; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.TimeZone; -import java.util.stream.Collectors; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.collection.IsEmptyCollection.empty; -import static org.hamcrest.collection.IsMapContaining.hasEntry; -import static org.junit.Assert.*; -import static org.mockito.Mockito.mock; - -public class MessageRecordConverterForJsonTest { - - private final OdpfSinkConfig defaultConfig = ConfigFactory.create(OdpfSinkConfig.class, Collections.emptyMap()); - private final Record.RecordBuilder recordBuilder = Record.builder(); - private final Map emptyMetadata = Collections.emptyMap(); - private final Map emptyColumnsMap = Collections.emptyMap(); - private final ErrorInfo noError = null; - private final Instrumentation instrumentation = mock(Instrumentation.class); - private final JsonParserMetrics jsonParserMetrics = new JsonParserMetrics(defaultConfig); - private static final TimeZone TZ = TimeZone.getTimeZone("UTC"); - private static final DateFormat DF = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'"); - static { - DF.setTimeZone(TZ); - } - - @Test - public void shouldReturnEmptyRecordsforEmptyList() { - OdpfMessageParser parser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); - OdpfMessageSchema schema = null; - BigQuerySinkConfig bigQuerySinkConfig = null; - MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); - List emptyOdpfMessageList = Collections.emptyList(); - - Records records = converter.convert(emptyOdpfMessageList); - List emptyRecordList = Collections.emptyList(); - Records expectedRecords = new Records(emptyRecordList, emptyRecordList); - assertEquals(expectedRecords, records); - } - - @Test - public void shouldConvertJsonMessagesToRecordForLogMessage() { - OdpfMessageParser parser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); - OdpfMessageSchema schema = null; - HashMap configMap = new HashMap<>(); - configMap.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_MESSAGE"); - BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); - MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); - List messages = new ArrayList<>(); - messages.add(getOdpfMessageForString("{ \"first_name\": \"john doe\"}")); - messages.add(getOdpfMessageForString("{ \"last_name\": \"walker\"}")); - - Records records = converter.convert(messages); - - List expectedValidRecords = new ArrayList<>(); - - Record validRecord1 = recordBuilder - .metadata(emptyMetadata) - .columns(ImmutableMap.of("first_name", "john doe")) - .index(0L) - .errorInfo(noError) - .build(); - - Record validRecord2 = recordBuilder - .metadata(emptyMetadata) - .columns(ImmutableMap.of("last_name", "walker")) - .index(1L) - .errorInfo(noError) - .build(); - expectedValidRecords.add(validRecord1); - expectedValidRecords.add(validRecord2); - List invalidRecords = Collections.emptyList(); - Records expectedRecords = new Records(expectedValidRecords, invalidRecords); - assertEquals(expectedRecords, records); - - - } - - - @Test - public void shouldConvertJsonMessagesToRecordForLogKey() { - OdpfMessageParser parser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); - OdpfMessageSchema schema = null; - HashMap configMap = new HashMap<>(); - configMap.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_KEY"); - BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); - MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); - List messages = new ArrayList<>(); - messages.add(new OdpfMessage("{ \"first_name\": \"john doe\"}".getBytes(), null)); - messages.add(new OdpfMessage("{ \"last_name\": \"walker\"}".getBytes(), null)); - - Records records = converter.convert(messages); - - List expectedValidRecords = new ArrayList<>(); - - Record validRecord1 = recordBuilder - .metadata(emptyMetadata) - .columns(ImmutableMap.of("first_name", "john doe")) - .index(0L) - .errorInfo(noError) - .build(); - - Record validRecord2 = recordBuilder - .metadata(emptyMetadata) - .columns(ImmutableMap.of("last_name", "walker")) - .index(1L) - .errorInfo(noError) - .build(); - expectedValidRecords.add(validRecord1); - expectedValidRecords.add(validRecord2); - List invalidRecords = Collections.emptyList(); - Records expectedRecords = new Records(expectedValidRecords, invalidRecords); - assertEquals(expectedRecords, records); - - - } - - - @Test - public void shouldHandleBothInvalidAndValidJsonMessages() { - OdpfMessageParser parser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); - OdpfMessageSchema schema = null; - HashMap configMap = new HashMap<>(); - configMap.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_MESSAGE"); - BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); - MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); - List messages = new ArrayList<>(); - messages.add(getOdpfMessageForString("{ \"first_name\": \"john doe\"}")); - messages.add(getOdpfMessageForString("{ invalid json str")); - messages.add(getOdpfMessageForString("{ \"last_name\": \"walker\"}")); - messages.add(getOdpfMessageForString("another invalid message")); - String nestedJsonStr = "{\n" - + " \"event_value\": {\n" - + " \"CustomerLatitude\": \"-6.166895595817224\",\n" - + " \"fb_content_type\": \"product\"\n" - + " },\n" - + " \"ip\": \"210.210.175.250\",\n" - + " \"oaid\": null,\n" - + " \"event_time\": \"2022-05-06 08:03:43.561\",\n" - + " \"is_receipt_validated\": null,\n" - + " \"contributor_1_campaign\": null\n" - + "}"; - - messages.add(getOdpfMessageForString(nestedJsonStr)); - - Records records = converter.convert(messages); - - - List expectedValidRecords = new ArrayList<>(); - Record validRecord1 = recordBuilder - .metadata(emptyMetadata) - .columns(ImmutableMap.of("first_name", "john doe")) - .index(0L) - .errorInfo(MessageRecordConverterForJsonTest.this.noError) - .build(); - - Record validRecord2 = recordBuilder - .metadata(emptyMetadata) - .columns(ImmutableMap.of("last_name", "walker")) - .index(2L) - .errorInfo(MessageRecordConverterForJsonTest.this.noError) - .build(); - - expectedValidRecords.add(validRecord1); - expectedValidRecords.add(validRecord2); - - ErrorInfo errorInfo = new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR); - ErrorInfo invalidMessageError = new ErrorInfo(null, ErrorType.INVALID_MESSAGE_ERROR); - List expectedInvalidRecords = new ArrayList<>(); - Record.RecordBuilder invalidRecordBuilder = recordBuilder.metadata(emptyMetadata).columns(emptyColumnsMap); - - Record invalidRecord1 = invalidRecordBuilder - .index(1L) - .errorInfo(errorInfo) - .build(); - - Record invalidRecord3 = invalidRecordBuilder - .index(3L) - .errorInfo(errorInfo) - .build(); - - Record invalidRecord4 = invalidRecordBuilder - .index(4L) - .errorInfo(invalidMessageError) - .build(); - - expectedInvalidRecords.add(invalidRecord1); - expectedInvalidRecords.add(invalidRecord3); - expectedInvalidRecords.add(invalidRecord4); - - assertEquals(expectedValidRecords, records.getValidRecords()); - - assertEquals(expectedInvalidRecords, records.getInvalidRecords()); - - } - - @Test - public void shouldInjectEventTimestamp() throws ParseException { - OdpfMessageParser parser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); - OdpfMessageSchema schema = null; - Map configMap = ImmutableMap.of( - "SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_MESSAGE", - "SINK_CONNECTOR_SCHEMA_DATA_TYPE", "json", - "SINK_BIGQUERY_ADD_EVENT_TIMESTAMP_ENABLE", "true"); - - BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); - MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); - List messages = new ArrayList<>(); - messages.add(getOdpfMessageForString("{ \"first_name\": \"john doe\"}")); - messages.add(getOdpfMessageForString("{ \"last_name\": \"walker\"}")); - - Records actualRecords = converter.convert(messages); - - /* - cant do assert equals because of timestamp value - assertEquals(expectedRecords, records); - */ - assertThat(actualRecords.getInvalidRecords(), empty()); - assertEquals(2, actualRecords.getValidRecords().size()); - Record validRecord1 = actualRecords.getValidRecords().get(0); - assertNull(validRecord1.getErrorInfo()); - assertThat(validRecord1.getColumns(), hasEntry("first_name", "john doe")); - Record validRecord2 = actualRecords.getValidRecords().get(1); - assertNull(validRecord2.getErrorInfo()); - assertThat(validRecord2.getColumns(), hasEntry("last_name", "walker")); - - List dateTimeList = actualRecords - .getValidRecords() - .stream() - .map(k -> (String) k.getColumns().get("event_timestamp")) - .collect(Collectors.toList()); - long currentTimeMillis = System.currentTimeMillis(); - //assert that time stamp injected is recent by checking the difference to be less than 10 seconds - boolean timedifferenceForFirstDate = (currentTimeMillis - DF.parse(dateTimeList.get(0)).getTime()) < 60000; - long timeDifferenceForSecondDate = currentTimeMillis - DF.parse(dateTimeList.get(1)).getTime(); - assertTrue("the difference is " + timedifferenceForFirstDate, timedifferenceForFirstDate); - assertTrue("the difference is " + timeDifferenceForSecondDate, timeDifferenceForSecondDate < 60000); - } - - - private OdpfMessage getOdpfMessageForString(String jsonStr) { - byte[] logMessage = jsonStr.getBytes(); - return new OdpfMessage(null, logMessage); - } -} diff --git a/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterUtilsTest.java b/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterUtilsTest.java deleted file mode 100644 index 80628f6c..00000000 --- a/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterUtilsTest.java +++ /dev/null @@ -1,52 +0,0 @@ -package io.odpf.depot.bigquery.converter; - -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.message.OdpfMessage; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Assert; -import org.junit.Test; -import org.mockito.Mockito; - -import java.util.HashMap; -import java.util.Map; - -public class MessageRecordConverterUtilsTest { - - @Test - public void shouldAddMetaData() { - Map columns = new HashMap() {{ - put("test", 123); - }}; - OdpfMessage message = Mockito.mock(OdpfMessage.class); - Mockito.when(message.getMetadata(Mockito.any())).thenReturn(new HashMap() {{ - put("test2", "value2"); - put("something", 99L); - put("nvm", "nvm"); - }}); - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, new HashMap() {{ - put("SINK_BIGQUERY_ADD_METADATA_ENABLED", "true"); - put("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "test2=string,something=long,nvm=string"); - }}); - MessageRecordConverterUtils.addMetadata(columns, message, config); - Assert.assertEquals(new HashMap() {{ - put("test", 123); - put("test2", "value2"); - put("something", 99L); - put("nvm", "nvm"); - }}, columns); - } - - @Test - public void shouldAddTimeStampForJson() { - Map columns = new HashMap() {{ - put("test", 123); - }}; - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, new HashMap() {{ - put("SINK_CONNECTOR_SCHEMA_DATA_TYPE", "json"); - put("SINK_BIGQUERY_ADD_EVENT_TIMESTAMP_ENABLE", "true"); - }}); - MessageRecordConverterUtils.addTimeStampColumnForJson(columns, config); - Assert.assertEquals(2, columns.size()); - Assert.assertNotNull(columns.get("event_timestamp")); - } -} diff --git a/src/test/java/io/odpf/depot/bigquery/handler/JsonErrorHandlerTest.java b/src/test/java/io/odpf/depot/bigquery/handler/JsonErrorHandlerTest.java deleted file mode 100644 index b946569b..00000000 --- a/src/test/java/io/odpf/depot/bigquery/handler/JsonErrorHandlerTest.java +++ /dev/null @@ -1,562 +0,0 @@ -package io.odpf.depot.bigquery.handler; - -import com.google.api.client.util.DateTime; -import com.google.cloud.bigquery.BigQueryError; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.metrics.Instrumentation; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; -import org.mockito.Captor; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -import java.util.Collections; -import java.util.List; -import java.util.Map; - -import static java.util.Arrays.asList; -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.junit.Assert.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.*; - -public class JsonErrorHandlerTest { - - private final Schema emptyTableSchema = Schema.of(); - - private final BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, Collections.emptyMap()); - - @Mock - private BigQueryClient bigQueryClient; - - @Captor - private ArgumentCaptor> fieldsArgumentCaptor; - - @Mock - private Instrumentation instrumentation; - - @Before - public void setUp() throws Exception { - MockitoAnnotations.initMocks(this); - } - - private Field getField(String name, LegacySQLTypeName type) { - return Field.newBuilder(name, type).setMode(Field.Mode.NULLABLE).build(); - } - - @Test - public void shouldUpdateTableFieldsOnSchemaError() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - BigQueryError bigQueryError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> insertErrors = ImmutableMap.of(0L, Collections.singletonList(bigQueryError)); - - Record validRecord = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - List records = ImmutableList.of(validRecord); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - - jsonErrorHandler.handle(insertErrors, records); - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(firstName)); - } - - @Test - public void shouldNotUpdateTableWhenNoSchemaError() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - BigQueryError serverError = new BigQueryError("otherresons", "planet eart", "server error"); - BigQueryError anotherError = new BigQueryError("otherresons", "planet eart", "server error"); - Map> insertErrors = ImmutableMap.of(0L, asList(serverError, anotherError)); - - Record validRecord = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - List records = ImmutableList.of(validRecord); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - jsonErrorHandler.handle(insertErrors, records); - - verify(bigQueryClient, never()).upsertTable(any()); - - } - - @Test - public void shouldUpdateTableFieldsForMultipleRecords() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - - BigQueryError firstNameNotFoundError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - BigQueryError anotherError = new BigQueryError("otherresons", "planet eart", "some error"); - BigQueryError lastNameNotFoundError = new BigQueryError("invalid", "first_name", "no such field: last_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, asList(firstNameNotFoundError, anotherError), - 1L, Collections.singletonList(lastNameNotFoundError)); - - - Record validRecordWithFirstName = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - Map columnsMapWithLastName = ImmutableMap.of("last_name", "john carmack"); - Record validRecordWithLastName = Record.builder() - .columns(columnsMapWithLastName) - .build(); - - List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(firstName, lastName)); - } - - @Test - public void shouldIngoreRecordsWhichHaveOtherErrors() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - BigQueryError otherError = new BigQueryError("otherresons", "planet eart", "server error"); - Map> errorInfoMap = ImmutableMap.of( - 1L, asList(noSuchFieldError, otherError), - 0L, Collections.singletonList(otherError)); - - Record validRecordWithFirstName = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - Record validRecordWithLastName = Record.builder() - .columns(ImmutableMap.of("last_name", "john carmack")) - .build(); - - List validRecords = asList(validRecordWithFirstName, validRecordWithLastName); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(lastName)); - } - - @Test - public void shouldIngoreRecordsWithNoErrors() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of(1L, Collections.singletonList(noSuchFieldError)); - - Record validRecordWithFirstName = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - Record validRecordWithLastName = Record.builder() - .columns(ImmutableMap.of("last_name", "john carmack")) - .build(); - - List validRecords = asList(validRecordWithFirstName, validRecordWithLastName); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(lastName)); - } - - @Test - public void shouldUpdateOnlyUniqueFields() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError), - 2L, Collections.singletonList(noSuchFieldError)); - - Record validRecordWithFirstName = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - Map columnsMapWithLastName = ImmutableMap.of("last_name", "john carmack"); - Record validRecordWithLastName = Record.builder() - .columns(columnsMapWithLastName) - .build(); - Record anotheRecordWithLastName = Record.builder() - .columns(columnsMapWithLastName) - .build(); - - List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName, anotheRecordWithLastName); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(firstName, lastName)); - } - - @Test - public void shouldUpdatWithBothMissingFieldsAndExistingTableFields() { - //existing table fields - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - - Schema nonEmptyTableSchema = Schema.of(firstName, lastName); - when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError), - 2L, Collections.singletonList(noSuchFieldError)); - - Map columnsMapWithFistName = ImmutableMap.of( - "first_name", "john doe", - "newFieldAddress", "planet earth"); - Record validRecordWithFirstName = Record.builder() - .columns(columnsMapWithFistName) - .build(); - - Record validRecordWithLastName = Record.builder() - .columns(ImmutableMap.of("newFieldDog", "golden retriever")) - .build(); - Record anotheRecordWithLastName = Record.builder() - .columns(ImmutableMap.of("newFieldDog", "golden retriever")) - .build(); - - List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName, anotheRecordWithLastName); - - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - //missing fields - Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); - Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); - - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(firstName, lastName, newFieldDog, newFieldAddress)); - } - - @Test - public void shouldUpsertTableWithPartitionKey() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError)); - - Record validRecordWithFirstName = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - Map columnsMapWithTimestamp = ImmutableMap.of( - "last_name", "john carmack", - "event_timestamp_partition", "today's date"); - Record validRecordWithLastName = Record.builder().columns(columnsMapWithTimestamp).build(); - - List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName); - - Map envMap = ImmutableMap.of( - "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true", - "SINK_BIGQUERY_TABLE_PARTITION_KEY", "event_timestamp_partition", - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp_partition=timestamp"); - BigQuerySinkConfig partitionKeyConfig = ConfigFactory.create(BigQuerySinkConfig.class, envMap); - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, partitionKeyConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field eventTimestamp = getField("event_timestamp_partition", LegacySQLTypeName.TIMESTAMP); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, containsInAnyOrder(firstName, lastName, eventTimestamp)); - } - - @Test - public void shouldThrowExceptionWhenCastFieldsToStringNotTrue() { - when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of(0L, Collections.singletonList(noSuchFieldError)); - - Record validRecord = Record.builder() - .columns(ImmutableMap.of("first_name", "john doe")) - .build(); - - List records = Collections.singletonList(validRecord); - BigQuerySinkConfig stringDisableConfig = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false")); - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, stringDisableConfig, instrumentation); - assertThrows(UnsupportedOperationException.class, () -> jsonErrorHandler.handle(errorInfoMap, records)); - - verify(bigQueryClient, never()).upsertTable(any()); - } - - @Test - public void shouldUpdateMissingMetadataFields() { - //existing table fields - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - - Schema nonEmptyTableSchema = Schema.of(firstName, lastName); - when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError), - 2L, Collections.singletonList(noSuchFieldError)); - - Map columnsMapWithFistName = ImmutableMap.of( - "first_name", "john doe", - "newFieldAddress", "planet earth", - "message_offset", 111, - "load_time", new DateTime(System.currentTimeMillis())); - Record validRecordWithFirstName = Record.builder() - .columns(columnsMapWithFistName) - .metadata(ImmutableMap.of( - "message_offset", 111, - "load_time", new DateTime(System.currentTimeMillis()))) - .build(); - - Map columnsMapWithNewFieldDog = ImmutableMap.of( - "newFieldDog", "golden retriever", - "load_time", new DateTime(System.currentTimeMillis()), - "message_offset", 11); - Record validRecordWithLastName = Record.builder() - .columns(columnsMapWithNewFieldDog) - .metadata(ImmutableMap.of( - "load_time", new DateTime(System.currentTimeMillis()), - "message_offset", 11)) - .build(); - Record anotherRecordWithLastName = Record.builder() - .columns(columnsMapWithNewFieldDog) - .build(); - - List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, anotherRecordWithLastName); - - Map config = ImmutableMap.of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", - "message_offset=integer,load_time=timestamp"); - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - //missing fields - Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); - Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); - - Field messageOffset = getField("message_offset", LegacySQLTypeName.INTEGER); - Field loadTime = getField("load_time", LegacySQLTypeName.TIMESTAMP); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, - containsInAnyOrder(messageOffset, loadTime, firstName, lastName, newFieldDog, newFieldAddress)); - } - - @Test - public void shouldUpdateMissingMetadataFieldsAndDefaultColumns() { - //existing table fields - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - - Schema nonEmptyTableSchema = Schema.of(firstName, lastName); - when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError), - 2L, Collections.singletonList(noSuchFieldError)); - - Map columnsMapWithFistName = ImmutableMap.of( - "first_name", "john doe", - "newFieldAddress", "planet earth", - "depot", 123, - "message_offset", 111, - "load_time", new DateTime(System.currentTimeMillis())); - Record validRecordWithFirstName = Record.builder() - .columns(columnsMapWithFistName) - .metadata(ImmutableMap.of( - "message_offset", 111, - "load_time", new DateTime(System.currentTimeMillis()))) - .build(); - - Map columnsMapWithNewFieldDog = ImmutableMap.of( - "newFieldDog", "golden retriever", - "load_time", new DateTime(System.currentTimeMillis()), - "message_offset", 11); - Record validRecordWithLastName = Record.builder() - .columns(columnsMapWithNewFieldDog) - .metadata(ImmutableMap.of( - "load_time", new DateTime(System.currentTimeMillis()), - "message_offset", 11)) - .build(); - Record anotheRecordWithLastName = Record.builder() - .columns(columnsMapWithNewFieldDog) - .build(); - - List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, anotheRecordWithLastName); - - Map config = ImmutableMap.of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", - "message_offset=integer,load_time=timestamp", - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp_partition=timestamp,depot=integer"); - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - //missing fields - Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); - Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); - - Field messageOffset = getField("message_offset", LegacySQLTypeName.INTEGER); - Field loadTime = getField("load_time", LegacySQLTypeName.TIMESTAMP); - Field depot = getField("depot", LegacySQLTypeName.INTEGER); - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, - containsInAnyOrder(messageOffset, loadTime, firstName, lastName, newFieldDog, newFieldAddress, depot)); - } - - @Test - public void shouldNotAddMetadataFieldsWhenDisabled() { - //existing table fields - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - - Schema nonEmptyTableSchema = Schema.of(firstName, lastName); - when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError), - 2L, Collections.singletonList(noSuchFieldError)); - - Map columnsMapWithFistName = ImmutableMap.of( - "first_name", "john doe", - "newFieldAddress", "planet earth"); - Record validRecordWithFirstName = Record.builder() - .columns(columnsMapWithFistName) - .metadata(ImmutableMap.of( - "message_offset", 111, - "load_time", new DateTime(System.currentTimeMillis()))) - .build(); - - Record validRecordWithLastName = Record.builder() - .columns(ImmutableMap.of( - "newFieldDog", "golden retriever")) - .metadata(ImmutableMap.of( - "load_time", new DateTime(System.currentTimeMillis()), - "message_offset", 11)) - .build(); - Record anotheRecordWithLastName = Record.builder() - .columns(ImmutableMap.of( - "newFieldDog", "german sheppperd")) - .build(); - - List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, anotheRecordWithLastName); - - Map config = ImmutableMap - .of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,load_time=timestamp", - "SINK_BIGQUERY_ADD_METADATA_ENABLED", "false"); - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); - jsonErrorHandler.handle(errorInfoMap, validRecords); - - verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); - - //missing fields - Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); - Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); - - List actualFields = fieldsArgumentCaptor.getValue(); - assertThat(actualFields, - containsInAnyOrder(firstName, lastName, newFieldDog, newFieldAddress)); - } - - @Test - public void shouldThrowErrorForNamespacedMetadataNotSupported() { - //existing table fields - Field lastName = getField("last_name", LegacySQLTypeName.STRING); - Field firstName = getField("first_name", LegacySQLTypeName.STRING); - - Schema nonEmptyTableSchema = Schema.of(firstName, lastName); - when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); - - BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); - Map> errorInfoMap = ImmutableMap.of( - 0L, Collections.singletonList(noSuchFieldError), - 1L, Collections.singletonList(noSuchFieldError), - 2L, Collections.singletonList(noSuchFieldError)); - - Map columnsMapWithFistName = ImmutableMap.of( - "first_name", "john doe", - "newFieldAddress", "planet earth", - "message_offset", 111); - Record validRecordWithFirstName = Record.builder() - .columns(columnsMapWithFistName) - .build(); - - Map columnsMapWithNewFieldDog = ImmutableMap.of( - "newFieldDog", "golden retriever", - "load_time", new DateTime(System.currentTimeMillis())); - Record validRecordWithLastName = Record.builder() - .columns(columnsMapWithNewFieldDog) - .build(); - Record anotheRecordWithLastName = Record.builder() - .columns(columnsMapWithNewFieldDog) - .build(); - - List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, anotheRecordWithLastName); - - Map config = ImmutableMap.of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", - "message_offset=integer,load_time=timestamp", - "SINK_BIGQUERY_METADATA_NAMESPACE", "hello_world_namespace"); - BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); - JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); - assertThrows(UnsupportedOperationException.class, () -> jsonErrorHandler.handle(errorInfoMap, validRecords)); - verify(bigQueryClient, never()).upsertTable(any()); - } -} - diff --git a/src/test/java/io/odpf/depot/bigquery/json/BigqueryJsonUpdateListenerTest.java b/src/test/java/io/odpf/depot/bigquery/json/BigqueryJsonUpdateListenerTest.java deleted file mode 100644 index 1ea8c33a..00000000 --- a/src/test/java/io/odpf/depot/bigquery/json/BigqueryJsonUpdateListenerTest.java +++ /dev/null @@ -1,211 +0,0 @@ -package io.odpf.depot.bigquery.json; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.converter.MessageRecordConverter; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.metrics.Instrumentation; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; - -import java.util.Collections; -import java.util.List; - -import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.junit.Assert.assertThrows; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.when; - -public class BigqueryJsonUpdateListenerTest { - - private MessageRecordConverterCache converterCache; - private BigQueryClient mockBqClient; - private Instrumentation instrumentation; - - @Before - public void setUp() throws Exception { - converterCache = mock(MessageRecordConverterCache.class); - mockBqClient = mock(BigQueryClient.class); - Schema emptySchema = Schema.of(); - when(mockBqClient.getSchema()).thenReturn(emptySchema); - instrumentation = mock(Instrumentation.class); - } - - @Test - public void shouldSetMessageRecordConverterAndUpsertTable() { - BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, Collections.emptyMap()); - BigqueryJsonUpdateListener updateListener = new BigqueryJsonUpdateListener(bigQuerySinkConfig, converterCache, mockBqClient, instrumentation); - updateListener.setOdpfMessageParser(null); - updateListener.updateSchema(); - verify(converterCache, times(1)).setMessageRecordConverter(any(MessageRecordConverter.class)); - verify(mockBqClient, times(1)).upsertTable(Collections.emptyList()); - } - - @Test - public void shouldCreateTableWithDefaultColumns() { - - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", - "SINK_CONNECTOR_DEFAULT_DATATYPE_STRING_ENABLE", "false" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - bigqueryJsonUpdateListener.updateSchema(); - List bqSchemaFields = ImmutableList.of( - Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - verify(mockBqClient, times(1)).upsertTable(bqSchemaFields); - } - - @Test - public void shouldCreateTableWithDefaultColumnsAndMetadataFields() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false", - "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,message_topic=string,message_timestamp=timestamp", - "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - bigqueryJsonUpdateListener.updateSchema(); - List bqSchemaFields = ImmutableList.of( - Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("message_offset", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("message_topic", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("message_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build() - ); - ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); - verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); - assertThat(listArgumentCaptor.getValue(), containsInAnyOrder(bqSchemaFields.toArray())); - } - - - @Test - public void shouldCreateTableWithDefaultColumnsWithDdifferentTypesAndMetadataFields() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=integer", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true", - "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,message_topic=string,message_timestamp=timestamp", - "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - bigqueryJsonUpdateListener.updateSchema(); - List bqSchemaFields = ImmutableList.of( - Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("first_name", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("message_offset", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("message_topic", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("message_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); - verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); - assertThat(listArgumentCaptor.getValue(), containsInAnyOrder(bqSchemaFields.toArray())); - } - - @Test - public void shouldNotAddMetadataFields() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false", - "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,message_topic=string,message_timestamp=timestamp", - "SINK_BIGQUERY_ADD_METADATA_ENABLED", "false" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - bigqueryJsonUpdateListener.updateSchema(); - List bqSchemaFields = ImmutableList.of( - Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); - verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); - assertThat(listArgumentCaptor.getValue(), containsInAnyOrder(bqSchemaFields.toArray())); - } - - @Test - public void shouldThrowErrorIfDefaultColumnsAndMetadataFieldsContainSameEntryCalledFirstName() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false", - "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,first_name=integer", - "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - assertThrows(IllegalArgumentException.class, bigqueryJsonUpdateListener::updateSchema); - } - - @Test - public void shouldThrowErrorIfMetadataNamespaceIsNotEmpty() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,first_name=integer", - "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true", - "SINK_BIGQUERY_METADATA_NAMESPACE", "metadata_namespace" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - assertThrows(UnsupportedOperationException.class, bigqueryJsonUpdateListener::updateSchema); - } - - @Test - public void shouldCreateTableWithDefaultColumnsAndExistingTableColumns() { - Field existingField1 = Field.of("existing_field1", LegacySQLTypeName.STRING); - Field existingField2 = Field.of("existing_field2", LegacySQLTypeName.STRING); - when(mockBqClient.getSchema()).thenReturn(Schema.of(existingField1, - existingField2)); - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - bigqueryJsonUpdateListener.updateSchema(); - ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); - verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); - List actualFields = listArgumentCaptor.getValue(); - Field eventTimestampField = Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build(); - Field firstNameField = Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - assertThat(actualFields, containsInAnyOrder(eventTimestampField, firstNameField, existingField1, existingField2)); - } - - @Test - public void shouldNotCastPartitionKeyToString() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", - "SINK_BIGQUERY_TABLE_PARTITION_KEY", "event_timestamp", - "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - bigqueryJsonUpdateListener.updateSchema(); - List bqSchemaFields = ImmutableList.of( - Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build(), - Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - verify(mockBqClient, times(1)).upsertTable(bqSchemaFields); - } - - @Test - public void shouldThrowErrorWhenPartitionKeyTypeIsNotCorrect() { - BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( - "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=integer,first_name=string", - "SINK_BIGQUERY_TABLE_PARTITION_KEY", "event_timestamp", - "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true", - "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true" - )); - BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, converterCache, mockBqClient, instrumentation); - assertThrows(UnsupportedOperationException.class, bigqueryJsonUpdateListener::updateSchema); - } - - @Test - public void shouldThrowExceptionWhenDynamicSchemaNotEnabled() { - BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, - ImmutableMap.of("SINK_BIGQUERY_DYNAMIC_SCHEMA_ENABLE", "false")); - assertThrows(UnsupportedOperationException.class, - () -> new BigqueryJsonUpdateListener(bigQuerySinkConfig, converterCache, mockBqClient, instrumentation)); - - } -} diff --git a/src/test/java/io/odpf/depot/bigquery/proto/BigqueryFieldsTest.java b/src/test/java/io/odpf/depot/bigquery/proto/BigqueryFieldsTest.java deleted file mode 100644 index 0029011f..00000000 --- a/src/test/java/io/odpf/depot/bigquery/proto/BigqueryFieldsTest.java +++ /dev/null @@ -1,288 +0,0 @@ -package io.odpf.depot.bigquery.proto; - -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.protobuf.DescriptorProtos; -import io.odpf.depot.message.proto.TestProtoUtil; -import io.odpf.depot.message.proto.Constants; -import io.odpf.depot.message.proto.ProtoField; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.junit.Assert.assertEquals; - -public class BigqueryFieldsTest { - - private final Map expectedType = new HashMap() {{ - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, LegacySQLTypeName.BYTES); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, LegacySQLTypeName.STRING); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, LegacySQLTypeName.BOOLEAN); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, LegacySQLTypeName.FLOAT); - put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, LegacySQLTypeName.FLOAT); - }}; - - @Test - public void shouldTestConvertToSchemaSuccessful() { - List nestedBQFields = new ArrayList<>(); - nestedBQFields.add(TestProtoUtil.createProtoField("field0_bytes", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(TestProtoUtil.createProtoField("field1_string", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(TestProtoUtil.createProtoField("field2_bool", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(TestProtoUtil.createProtoField("field3_enum", DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(TestProtoUtil.createProtoField("field4_double", DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(TestProtoUtil.createProtoField("field5_float", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - - List fields = BigqueryFields.generateBigquerySchema(TestProtoUtil.createProtoField(nestedBQFields)); - assertEquals(nestedBQFields.size(), fields.size()); - IntStream.range(0, nestedBQFields.size()) - .forEach(index -> { - assertEquals(Field.Mode.NULLABLE, fields.get(index).getMode()); - assertEquals(nestedBQFields.get(index).getName(), fields.get(index).getName()); - assertEquals(expectedType.get(nestedBQFields.get(index).getType()), fields.get(index).getType()); - }); - } - - @Test - public void shouldTestShouldConvertIntegerDataTypes() { - List allIntTypes = new ArrayList() {{ - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED64); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT32); - add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT64); - }}; - - List nestedBQFields = IntStream.range(0, allIntTypes.size()) - .mapToObj(index -> TestProtoUtil.createProtoField("field-" + index, allIntTypes.get(index), DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)) - .collect(Collectors.toList()); - - - List fields = BigqueryFields.generateBigquerySchema(TestProtoUtil.createProtoField(nestedBQFields)); - assertEquals(nestedBQFields.size(), fields.size()); - IntStream.range(0, nestedBQFields.size()) - .forEach(index -> { - assertEquals(Field.Mode.NULLABLE, fields.get(index).getMode()); - assertEquals(nestedBQFields.get(index).getName(), fields.get(index).getName()); - assertEquals(LegacySQLTypeName.INTEGER, fields.get(index).getType()); - }); - } - - - @Test - public void shouldTestShouldConvertNestedField() { - List nestedBQFields = new ArrayList<>(); - nestedBQFields.add(TestProtoUtil.createProtoField("field1_level2_nested", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - nestedBQFields.add(TestProtoUtil.createProtoField("field2_level2_nested", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("field1_level1", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(TestProtoUtil.createProtoField("field2_level1_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - nestedBQFields)); - }}); - - - List fields = BigqueryFields.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertEquals(nestedBQFields.size(), fields.get(1).getSubFields().size()); - - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(0)); - assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, fields.get(1)); - assertBqField(nestedBQFields.get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(1).getSubFields().get(0)); - assertBqField(nestedBQFields.get(1).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(1).getSubFields().get(1)); - - } - - - @Test - public void shouldTestShouldConvertMultiNestedFields() { - List nestedBQFields = new ArrayList() {{ - add(TestProtoUtil.createProtoField("field1_level3_nested", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(TestProtoUtil.createProtoField("field2_level3_nested", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - }}; - - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("field1_level1", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(TestProtoUtil.createProtoField( - "field2_level1_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - new ArrayList() {{ - add(TestProtoUtil.createProtoField( - "field1_level2", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(TestProtoUtil.createProtoField( - "field2_level2_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - nestedBQFields)); - add(TestProtoUtil.createProtoField( - "field3_level2", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(TestProtoUtil.createProtoField( - "field4_level2_message", - "some.type.name", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - nestedBQFields)); - }} - )); - }}); - - List fields = BigqueryFields.generateBigquerySchema(protoField); - - - assertEquals(protoField.getFields().size(), fields.size()); - assertEquals(4, fields.get(1).getSubFields().size()); - assertEquals(2, fields.get(1).getSubFields().get(1).getSubFields().size()); - assertEquals(2, fields.get(1).getSubFields().get(3).getSubFields().size()); - assertMultipleFields(nestedBQFields, fields.get(1).getSubFields().get(1).getSubFields()); - assertMultipleFields(nestedBQFields, fields.get(1).getSubFields().get(3).getSubFields()); - } - - @Test - public void shouldTestConvertToSchemaForTimestamp() { - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("field1_timestamp", - Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME, - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - }}); - - List fields = BigqueryFields.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.TIMESTAMP, Field.Mode.NULLABLE, fields.get(0)); - } - - - @Test - public void shouldTestConvertToSchemaForSpecialFields() { - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("field1_struct", - Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME, - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - add(TestProtoUtil.createProtoField("field2_bytes", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(TestProtoUtil.createProtoField("field3_duration", - "." + com.google.protobuf.Duration.getDescriptor().getFullName(), - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - new ArrayList() { - { - add(TestProtoUtil.createProtoField("duration_seconds", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(TestProtoUtil.createProtoField("duration_nanos", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - } - })); - - add(TestProtoUtil.createProtoField("field3_date", - "." + com.google.type.Date.getDescriptor().getFullName(), - DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, - new ArrayList() { - { - add(TestProtoUtil.createProtoField("year", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(TestProtoUtil.createProtoField("month", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - add(TestProtoUtil.createProtoField("day", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); - - } - })); - - }}); - - List fields = BigqueryFields.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, fields.get(0)); - assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.BYTES, Field.Mode.NULLABLE, fields.get(1)); - assertBqField(protoField.getFields().get(2).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, fields.get(2)); - assertBqField(protoField.getFields().get(3).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, fields.get(3)); - assertEquals(2, fields.get(2).getSubFields().size()); - assertBqField("duration_seconds", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(2).getSubFields().get(0)); - assertBqField("duration_nanos", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(2).getSubFields().get(1)); - - assertEquals(3, fields.get(3).getSubFields().size()); - assertBqField("year", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(0)); - assertBqField("month", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(1)); - assertBqField("day", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(2)); - } - - @Test - public void shouldTestConvertToSchemaForRepeatedFields() { - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("field1_map", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED)); - add(TestProtoUtil.createProtoField("field2_repeated", - DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, - DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED)); - - }}); - - List fields = BigqueryFields.generateBigquerySchema(protoField); - - assertEquals(protoField.getFields().size(), fields.size()); - assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.INTEGER, Field.Mode.REPEATED, fields.get(0)); - assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.STRING, Field.Mode.REPEATED, fields.get(1)); - } - - public void assertMultipleFields(List pfields, List bqFields) { - IntStream.range(0, bqFields.size()) - .forEach(index -> { - assertBqField(pfields.get(index).getName(), expectedType.get(pfields.get(index).getType()), Field.Mode.NULLABLE, bqFields.get(index)); - }); - } - - public void assertBqField(String name, LegacySQLTypeName ftype, Field.Mode mode, Field bqf) { - assertEquals(mode, bqf.getMode()); - assertEquals(name, bqf.getName()); - assertEquals(ftype, bqf.getType()); - } - - -} diff --git a/src/test/java/io/odpf/depot/bigtable/parser/BigTableRowKeyParserTest.java b/src/test/java/io/odpf/depot/bigtable/parser/BigTableRowKeyParserTest.java deleted file mode 100644 index cfe45f94..00000000 --- a/src/test/java/io/odpf/depot/bigtable/parser/BigTableRowKeyParserTest.java +++ /dev/null @@ -1,94 +0,0 @@ -package io.odpf.depot.bigtable.parser; - -import com.google.protobuf.Descriptors; -import com.timgroup.statsd.NoOpStatsDClient; -import io.odpf.depot.TestKey; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestNestedMessage; -import io.odpf.depot.TestNestedRepeatedMessage; -import io.odpf.depot.common.Template; -import io.odpf.depot.config.BigTableSinkConfig; -import io.odpf.depot.exception.InvalidTemplateException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.metrics.StatsDReporter; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Test; -import org.junit.jupiter.api.Assertions; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class BigTableRowKeyParserTest { - - private final Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); - put(String.format("%s", TestNestedRepeatedMessage.class.getName()), TestNestedRepeatedMessage.getDescriptor()); - }}; - - @Test - public void shouldReturnParsedRowKeyForValidParameterisedTemplate() throws IOException, InvalidTemplateException { - System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-%s$key#%s*test,order_number,order_details"); - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestMessage"); - BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); - - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), null); - OdpfMessageSchema schema = odpfMessageParser.getSchema(sinkConfig.getSinkConnectorSchemaProtoMessageClass(), descriptorsMap); - - byte[] logMessage = TestMessage.newBuilder() - .setOrderNumber("xyz-order") - .setOrderDetails("eureka") - .build() - .toByteArray(); - OdpfMessage message = new OdpfMessage(null, logMessage); - ParsedOdpfMessage parsedOdpfMessage = odpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, sinkConfig.getSinkConnectorSchemaProtoMessageClass()); - - BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema); - String parsedRowKey = bigTableRowKeyParser.parse(parsedOdpfMessage); - assertEquals("row-xyz-order$key#eureka*test", parsedRowKey); - } - - @Test - public void shouldReturnTheRowKeySameAsTemplateWhenTemplateIsValidAndContainsOnlyConstantStrings() throws IOException, InvalidTemplateException { - System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-key#constant$String"); - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestMessage"); - BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); - - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), null); - OdpfMessageSchema schema = odpfMessageParser.getSchema(sinkConfig.getSinkConnectorSchemaProtoMessageClass(), descriptorsMap); - - byte[] logMessage = TestMessage.newBuilder() - .setOrderNumber("xyz-order") - .setOrderDetails("eureka") - .build() - .toByteArray(); - OdpfMessage message = new OdpfMessage(null, logMessage); - ParsedOdpfMessage parsedOdpfMessage = odpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, sinkConfig.getSinkConnectorSchemaProtoMessageClass()); - - BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema); - String parsedRowKey = bigTableRowKeyParser.parse(parsedOdpfMessage); - assertEquals("row-key#constant$String", parsedRowKey); - } - - @Test - public void shouldThrowErrorForInvalidTemplate() throws IOException { - System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-key%s"); - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestMessage"); - BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); - - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, new StatsDReporter(new NoOpStatsDClient()), null); - OdpfMessageSchema schema = odpfMessageParser.getSchema(sinkConfig.getSinkConnectorSchemaProtoMessageClass(), descriptorsMap); - - InvalidTemplateException illegalArgumentException = Assertions.assertThrows(InvalidTemplateException.class, () -> new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema)); - assertEquals("Template is not valid, variables=1, validArgs=1, values=0", illegalArgumentException.getMessage()); - } - -} diff --git a/src/test/java/io/odpf/depot/log/LogSinkTest.java b/src/test/java/io/odpf/depot/log/LogSinkTest.java deleted file mode 100644 index 81a38bd2..00000000 --- a/src/test/java/io/odpf/depot/log/LogSinkTest.java +++ /dev/null @@ -1,115 +0,0 @@ -package io.odpf.depot.log; - -import io.odpf.depot.message.json.JsonOdpfMessageParser; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.OdpfSinkException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.JsonParserMetrics; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Before; -import org.junit.Test; -import org.mockito.ArgumentCaptor; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.HashMap; -import java.util.Map; - -import static org.hamcrest.Matchers.containsInAnyOrder; -import static org.junit.Assert.*; -import static org.mockito.ArgumentMatchers.any; -import static org.mockito.ArgumentMatchers.eq; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.never; -import static org.mockito.Mockito.times; -import static org.mockito.Mockito.verify; - -public class LogSinkTest { - private final String template = "\n================= DATA =======================\n{}\n================= METADATA =======================\n{}\n"; - private OdpfSinkConfig config; - private OdpfMessageParser odpfMessageParser; - private Instrumentation instrumentation; - private JsonParserMetrics jsonParserMetrics; - - @Before - public void setUp() throws Exception { - config = mock(OdpfSinkConfig.class); - odpfMessageParser = mock(OdpfMessageParser.class); - instrumentation = mock(Instrumentation.class); - jsonParserMetrics = new JsonParserMetrics(config); - - } - - @Test - public void shouldProcessEmptyMessageWithNoError() throws IOException { - LogSink logSink = new LogSink(config, odpfMessageParser, instrumentation); - ArrayList messages = new ArrayList<>(); - OdpfSinkResponse odpfSinkResponse = logSink.pushToSink(messages); - Map errors = odpfSinkResponse.getErrors(); - - assertEquals(Collections.emptyMap(), errors); - verify(odpfMessageParser, never()).parse(any(), any(), any()); - verify(instrumentation, never()).logInfo(any(), any(), any()); - } - - @Test - public void shouldLogJsonMessages() throws OdpfSinkException { - HashMap configMap = new HashMap() {{ - put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "log_message"); - }}; - OdpfSinkConfig odpfSinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - OdpfMessageParser messageParser = new JsonOdpfMessageParser(odpfSinkConfig, instrumentation, jsonParserMetrics); - LogSink logSink = new LogSink(odpfSinkConfig, messageParser, instrumentation); - ArrayList messages = new ArrayList<>(); - String validJsonFirstName = "{\"first_name\":\"john\"}"; - byte[] logMessage1 = validJsonFirstName.getBytes(); - String validJsonLastName = "{\"last_name\":\"doe\"}"; - byte[] logMessage2 = validJsonLastName.getBytes(); - messages.add(new OdpfMessage(null, logMessage1)); - messages.add(new OdpfMessage(null, logMessage2)); - OdpfSinkResponse odpfSinkResponse = logSink.pushToSink(messages); - - //assert no error - Map errors = odpfSinkResponse.getErrors(); - assertEquals(Collections.emptyMap(), errors); - - //assert processed message - ArgumentCaptor jsonStrCaptor = ArgumentCaptor.forClass(String.class); - verify(instrumentation, times(2)).logInfo(eq(template), jsonStrCaptor.capture(), eq(Collections.emptyMap().toString())); - assertThat(jsonStrCaptor.getAllValues(), containsInAnyOrder(validJsonFirstName, validJsonLastName)); - } - - @Test - public void shouldReturnErrorResponseAndProcessValidMessage() throws OdpfSinkException { - HashMap configMap = new HashMap() {{ - put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "log_message"); - }}; - OdpfSinkConfig odpfSinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - - OdpfMessageParser messageParser = new JsonOdpfMessageParser(odpfSinkConfig, instrumentation, jsonParserMetrics); - LogSink logSink = new LogSink(odpfSinkConfig, messageParser, instrumentation); - ArrayList messages = new ArrayList<>(); - String validJsonFirstName = "{\"first_name\":\"john\"}"; - byte[] logMessage1 = validJsonFirstName.getBytes(); - String invalidJson = "{\"last_name"; - byte[] invalidLogMessage = invalidJson.getBytes(); - messages.add(new OdpfMessage(null, logMessage1)); - messages.add(new OdpfMessage(null, invalidLogMessage)); - OdpfSinkResponse odpfSinkResponse = logSink.pushToSink(messages); - - //assert error - ErrorInfo error = odpfSinkResponse.getErrorsFor(1L); - assertEquals(ErrorType.DESERIALIZATION_ERROR, error.getErrorType()); - - //assert valid message processed - ArgumentCaptor jsonStrCaptor = ArgumentCaptor.forClass(String.class); - verify(instrumentation, times(1)).logInfo(eq(template), jsonStrCaptor.capture(), eq(Collections.emptyMap().toString())); - assertEquals(validJsonFirstName, jsonStrCaptor.getValue().toString()); - } -} diff --git a/src/test/java/io/odpf/depot/message/field/proto/MessageFieldTest.java b/src/test/java/io/odpf/depot/message/field/proto/MessageFieldTest.java deleted file mode 100644 index ea255122..00000000 --- a/src/test/java/io/odpf/depot/message/field/proto/MessageFieldTest.java +++ /dev/null @@ -1,95 +0,0 @@ -package io.odpf.depot.message.field.proto; - -import com.google.protobuf.Timestamp; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestNestedRepeatedMessage; -import org.junit.Test; -import org.skyscreamer.jsonassert.JSONAssert; - -public class MessageFieldTest { - - @Test - public void shouldReturnJsonStringForMessage() { - TestMessage message = TestMessage.newBuilder() - .setOrderNumber("number") - .setOrderDetails("details") - .setOrderUrl("url") - .build(); - MessageField field = new MessageField(message); - String expectedJson = "{\"order_number\":\"number\",\"order_url\":\"url\",\"order_details\":\"details\"}"; - JSONAssert.assertEquals(expectedJson, field.getString(), true); - } - - @Test - public void shouldReturnMessageForRepeatedMessage() { - TestNestedRepeatedMessage message = TestNestedRepeatedMessage.newBuilder() - .addRepeatedMessage(TestMessage.newBuilder() - .setOrderNumber("number") - .setOrderDetails("details") - .setOrderUrl("url") - .build()) - .addRepeatedMessage(TestMessage.newBuilder() - .setOrderNumber("o2") - .setOrderDetails("d2") - .setOrderUrl("url2") - .build()) - .setSingleMessage(TestMessage.newBuilder() - .setOrderNumber("order1") - .setOrderDetails("de1") - .setOrderUrl("url1") - .build()) - .setNumberField(10) - .addRepeatedNumberField(12) - .addRepeatedNumberField(13) - .setSingleTimestamp(Timestamp.newBuilder().setSeconds(1669962594).build()) - .addRepeatedTimestamp(Timestamp.newBuilder().setSeconds(1669932594).build()) - .addRepeatedTimestamp(Timestamp.newBuilder().setSeconds(1664932594).build()) - .build(); - MessageField field = new MessageField(message); - String expectedJson = "{\n" - + " \"single_timestamp\": \"2022-12-02T06:29:54Z\",\n" - + " \"repeated_number_field\": [\n" - + " 12,\n" - + " 13\n" - + " ],\n" - + " \"repeated_timestamp\": [\n" - + " \"2022-12-01T22:09:54Z\",\n" - + " \"2022-10-05T01:16:34Z\"\n" - + " ],\n" - + " \"repeated_message\": [\n" - + " {\n" - + " \"order_url\": \"url\",\n" - + " \"order_number\": \"number\",\n" - + " \"order_details\": \"details\"\n" - + " },\n" - + " {\n" - + " \"order_url\": \"url2\",\n" - + " \"order_number\": \"o2\",\n" - + " \"order_details\": \"d2\"\n" - + " }\n" - + " ],\n" - + " \"single_message\": {\n" - + " \"order_url\": \"url1\",\n" - + " \"order_number\": \"order1\",\n" - + " \"order_details\": \"de1\"\n" - + " },\n" - + " \"number_field\": 10\n" - + "}\n"; - JSONAssert.assertEquals(expectedJson, field.getString(), true); - - expectedJson = "[\n" - + " {\n" - + " \"order_number\": \"number\",\n" - + " \"order_url\": \"url\",\n" - + " \"order_details\": \"details\"\n" - + " },\n" - + " {\n" - + " \"order_number\": \"o2\",\n" - + " \"order_url\": \"url2\",\n" - + " \"order_details\": \"d2\"\n" - + " }\n" - + "]"; - field = new MessageField(message.getField(message.getDescriptorForType().findFieldByName("repeated_message"))); - JSONAssert.assertEquals(expectedJson, field.getString(), true); - } -} diff --git a/src/test/java/io/odpf/depot/message/proto/ProtoFieldParserTest.java b/src/test/java/io/odpf/depot/message/proto/ProtoFieldParserTest.java deleted file mode 100644 index 0d0c4a5f..00000000 --- a/src/test/java/io/odpf/depot/message/proto/ProtoFieldParserTest.java +++ /dev/null @@ -1,174 +0,0 @@ -package io.odpf.depot.message.proto; - -import com.google.protobuf.*; -import com.google.type.Date; -import io.odpf.depot.TestMessageBQ; -import io.odpf.depot.TestNestedMessageBQ; -import io.odpf.depot.TestRecursiveMessageBQ; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; - -public class ProtoFieldParserTest { - private ProtoFieldParser protoMappingParser; - - @Before - public void setup() { - this.protoMappingParser = new ProtoFieldParser(); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfProtoNotFound() { - protoMappingParser.parseFields(null, "test", new HashMap<>(), new HashMap<>()); - } - - @Test(expected = RuntimeException.class) - public void shouldThrowExceptionIfNestedProtoNotFound() { - Map descriptorMap = new HashMap() {{ - put("io.odpf.depot.TestMessageBQ", TestMessageBQ.getDescriptor()); - }}; - ProtoField protoField = new ProtoField(); - protoMappingParser.parseFields(protoField, "io.odpf.depot.TestNestedMessageBQ", descriptorMap, new HashMap<>()); - } - - @Test - public void shouldParseProtoSchemaForNonNestedFields() { - ArrayList fileDescriptors = new ArrayList<>(); - - fileDescriptors.add(TestMessageBQ.getDescriptor().getFile()); - fileDescriptors.add(Duration.getDescriptor().getFile()); - fileDescriptors.add(Date.getDescriptor().getFile()); - fileDescriptors.add(Struct.getDescriptor().getFile()); - fileDescriptors.add(Timestamp.getDescriptor().getFile()); - - Map descriptorMap = getDescriptors(fileDescriptors); - - Map typeNameToPackageNameMap = new HashMap() {{ - put(".odpf.depot.TestMessageBQ.CurrentStateEntry", "io.odpf.depot.TestMessageBQ.CurrentStateEntry"); - put(".google.protobuf.Struct.FieldsEntry", "com.google.protobuf.Struct.FieldsEntry"); - put(".google.protobuf.Duration", "com.google.protobuf.Duration"); - put(".google.type.Date", "com.google.type.Date"); - }}; - - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, "io.odpf.depot.TestMessageBQ", descriptorMap, typeNameToPackageNameMap); - assertTestMessage(protoField.getFields()); - } - - @Test - public void shouldParseProtoSchemaForRecursiveFieldTillMaxLevel() { - ArrayList fileDescriptors = new ArrayList<>(); - - fileDescriptors.add(TestRecursiveMessageBQ.getDescriptor().getFile()); - - Map descriptorMap = getDescriptors(fileDescriptors); - - Map typeNameToPackageNameMap = new HashMap() {{ - put(".odpf.depot.TestRecursiveMessageBQ", "io.odpf.depot.TestRecursiveMessageBQ"); - }}; - - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, "io.odpf.depot.TestRecursiveMessageBQ", descriptorMap, typeNameToPackageNameMap); - assertField(protoField.getFields().get(0), "string_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(protoField.getFields().get(1), "float_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - - ProtoField recursiveField = protoField; - int totalLevel = 1; - while (recursiveField.getFields().size() == 3) { - assertField(protoField.getFields().get(0), "string_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(protoField.getFields().get(1), "float_value", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - recursiveField = recursiveField.getFields().get(2); - totalLevel++; - } - assertEquals(15, totalLevel); - } - - @Test - public void shouldParseProtoSchemaForNestedFields() { - ArrayList fileDescriptors = new ArrayList<>(); - - fileDescriptors.add(TestMessageBQ.getDescriptor().getFile()); - fileDescriptors.add(Duration.getDescriptor().getFile()); - fileDescriptors.add(Date.getDescriptor().getFile()); - fileDescriptors.add(Struct.getDescriptor().getFile()); - fileDescriptors.add(TestNestedMessageBQ.getDescriptor().getFile()); - - Map descriptorMap = getDescriptors(fileDescriptors); - - Map typeNameToPackageNameMap = new HashMap() {{ - put(".odpf.depot.TestMessageBQ.CurrentStateEntry", "io.odpf.depot.TestMessageBQ.CurrentStateEntry"); - put(".google.protobuf.Struct.FieldsEntry", "com.google.protobuf.Struct.FieldsEntry"); - put(".google.protobuf.Duration", "com.google.protobuf.Duration"); - put(".google.type.Date", "com.google.type.Date"); - put(".odpf.depot.TestMessageBQ", "io.odpf.depot.TestMessageBQ"); - }}; - - ProtoField protoField = new ProtoField(); - protoField = protoMappingParser.parseFields(protoField, "io.odpf.depot.TestNestedMessageBQ", descriptorMap, typeNameToPackageNameMap); - assertField(protoField.getFields().get(0), "nested_id", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(protoField.getFields().get(1), "single_message", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - - assertTestMessage(protoField.getFields().get(1).getFields()); - } - - private Map getDescriptors(ArrayList fileDescriptors) { - Map descriptorMap = new HashMap<>(); - fileDescriptors.forEach(fd -> { - String javaPackage = fd.getOptions().getJavaPackage(); - fd.getMessageTypes().forEach(desc -> { - String className = desc.getName(); - desc.getNestedTypes().forEach(nestedDesc -> { - String nestedClassName = nestedDesc.getName(); - descriptorMap.put(String.format("%s.%s.%s", javaPackage, className, nestedClassName), nestedDesc); - }); - descriptorMap.put(String.format("%s.%s", javaPackage, className), desc); - }); - }); - return descriptorMap; - } - - private void assertTestMessage(List fields) { - assertEquals(17, fields.size()); - assertField(fields.get(0), "order_number", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(fields.get(1), "order_url", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - assertField(fields.get(2), "order_details", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 3); - assertField(fields.get(3), "created_at", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 4); - assertField(fields.get(4), "status", DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 5); - assertField(fields.get(5), "discount", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 6); - assertField(fields.get(6), "success", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 7); - assertField(fields.get(7), "price", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 8); - assertField(fields.get(8), "current_state", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 9); - assertField(fields.get(9), "user_token", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 10); - assertField(fields.get(10), "trip_duration", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 11); - assertField(fields.get(11), "aliases", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 12); - assertField(fields.get(12), "properties", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 13); - assertField(fields.get(13), "order_date", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 14); - assertField(fields.get(14), "updated_at", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 15); - assertField(fields.get(15), "attributes", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 16); - assertField(fields.get(16), "intervals", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 17); - - assertEquals(String.format(".%s", Duration.getDescriptor().getFullName()), fields.get(10).getTypeName()); - assertEquals(2, fields.get(10).getFields().size()); - assertField(fields.get(10).getFields().get(0), "seconds", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(fields.get(10).getFields().get(1), "nanos", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - - assertEquals(String.format(".%s", Date.getDescriptor().getFullName()), fields.get(13).getTypeName()); - assertEquals(3, fields.get(13).getFields().size()); - assertField(fields.get(13).getFields().get(0), "year", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); - assertField(fields.get(13).getFields().get(1), "month", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); - assertField(fields.get(13).getFields().get(2), "day", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 3); - } - - private void assertField(ProtoField field, String name, DescriptorProtos.FieldDescriptorProto.Type ftype, DescriptorProtos.FieldDescriptorProto.Label flabel, int index) { - assertEquals(name, field.getName()); - assertEquals(ftype, field.getType()); - assertEquals(flabel, field.getLabel()); - assertEquals(index, field.getIndex()); - } -} diff --git a/src/test/java/io/odpf/depot/message/proto/ProtoOdpfMessageParserTest.java b/src/test/java/io/odpf/depot/message/proto/ProtoOdpfMessageParserTest.java deleted file mode 100644 index c29b9329..00000000 --- a/src/test/java/io/odpf/depot/message/proto/ProtoOdpfMessageParserTest.java +++ /dev/null @@ -1,92 +0,0 @@ -package io.odpf.depot.message.proto; - -import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.stencil.OdpfStencilUpdateListener; -import io.odpf.depot.TestMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.config.OdpfSinkConfig; -import org.aeonbits.owner.ConfigFactory; -import org.junit.Test; - -import java.io.IOException; -import java.util.HashMap; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.mock; - -public class ProtoOdpfMessageParserTest { - - private final HashMap configMap = new HashMap() {{ - put("SCHEMA_REGISTRY_STENCIL_ENABLE", "false"); - put("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestMessage"); - }}; - - @Test - public void shouldParseLogMessage() throws IOException { - OdpfSinkConfig sinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - StatsDReporter statsdReporter = mock(StatsDReporter.class); - OdpfStencilUpdateListener protoUpdateListener = mock(OdpfStencilUpdateListener.class); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, statsdReporter, protoUpdateListener); - TestMessage testMessage = TestMessage.newBuilder().setOrderNumber("order-1").build(); - OdpfMessage message = new OdpfMessage(null, testMessage.toByteArray()); - ParsedOdpfMessage parsedOdpfMessage = protoOdpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, "io.odpf.depot.TestMessage"); - assertEquals(testMessage, parsedOdpfMessage.getRaw()); - - } - - @Test - public void shouldThrowErrorOnInvalidMessage() { - OdpfSinkConfig sinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - StatsDReporter statsdReporter = mock(StatsDReporter.class); - OdpfStencilUpdateListener protoUpdateListener = mock(OdpfStencilUpdateListener.class); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, statsdReporter, protoUpdateListener); - byte[] invalidMessageBytes = "invalid message".getBytes(); - OdpfMessage message = new OdpfMessage(null, invalidMessageBytes); - assertThrows(InvalidProtocolBufferException.class, () -> { - protoOdpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, "io.odpf.depot.TestMessage"); - }); - } - - @Test - public void shouldParseLogKey() throws IOException { - OdpfSinkConfig sinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - StatsDReporter statsdReporter = mock(StatsDReporter.class); - OdpfStencilUpdateListener protoUpdateListener = mock(OdpfStencilUpdateListener.class); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, statsdReporter, protoUpdateListener); - TestMessage testKey = TestMessage.newBuilder().setOrderNumber("order-1").build(); - OdpfMessage message = new OdpfMessage(testKey.toByteArray(), null); - ParsedOdpfMessage parsedOdpfMessage = protoOdpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_KEY, "io.odpf.depot.TestMessage"); - assertEquals(testKey, parsedOdpfMessage.getRaw()); - - } - - @Test - public void shouldThrowErrorOnInvalidKey() { - OdpfSinkConfig sinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - StatsDReporter statsdReporter = mock(StatsDReporter.class); - OdpfStencilUpdateListener protoUpdateListener = mock(OdpfStencilUpdateListener.class); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, statsdReporter, protoUpdateListener); - byte[] invalidKeyBytes = "invalid message".getBytes(); - OdpfMessage message = new OdpfMessage(invalidKeyBytes, null); - assertThrows(InvalidProtocolBufferException.class, () -> { - protoOdpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_KEY, "io.odpf.depot.TestMessage"); - }); - } - - @Test - public void shouldThrowErrorWhenModeNotDefined() { - OdpfSinkConfig sinkConfig = ConfigFactory.create(OdpfSinkConfig.class, configMap); - StatsDReporter statsdReporter = mock(StatsDReporter.class); - OdpfStencilUpdateListener protoUpdateListener = mock(OdpfStencilUpdateListener.class); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(sinkConfig, statsdReporter, protoUpdateListener); - byte[] validKeyBytes = TestMessage.newBuilder().setOrderNumber("order-1").build().toByteArray(); - OdpfMessage message = new OdpfMessage(validKeyBytes, null); - IOException ioException = assertThrows(IOException.class, () -> { - protoOdpfMessageParser.parse(message, null, null); - }); - assertEquals("parser mode not defined", ioException.getMessage()); - } -} diff --git a/src/test/java/io/odpf/depot/message/proto/converter/fields/StructProtoFieldTest.java b/src/test/java/io/odpf/depot/message/proto/converter/fields/StructProtoFieldTest.java deleted file mode 100644 index f2c62ad1..00000000 --- a/src/test/java/io/odpf/depot/message/proto/converter/fields/StructProtoFieldTest.java +++ /dev/null @@ -1,93 +0,0 @@ -package io.odpf.depot.message.proto.converter.fields; - -import com.google.protobuf.*; -import io.odpf.depot.TestStructMessage; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayList; -import java.util.List; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -public class StructProtoFieldTest { - - private StructProtoField structProtoField; - private Struct structValue; - - @Before - public void setUp() throws Exception { - List listValues = new ArrayList<>(); - listValues.add(Value.newBuilder().setNumberValue(1).build()); - listValues.add(Value.newBuilder().setNumberValue(2).build()); - - structValue = Struct.newBuilder() - .putFields("null_value", Value.newBuilder().setNullValue(NullValue.NULL_VALUE) - .build()) - .putFields("number_value", Value.newBuilder().setNumberValue(2.0).build()) - .putFields("string_value", Value.newBuilder().setStringValue("").build()) - .putFields("bool_value", Value.newBuilder().setBoolValue(false).build()) - .putFields("struct_value", Value.newBuilder().setStructValue( - Struct.newBuilder().putFields("child_value1", Value.newBuilder().setNumberValue(1.0).build()) - .build()) - .build()) - .putFields("list_value", Value.newBuilder().setListValue(ListValue.newBuilder() - .addAllValues(listValues).build()).build()) - .build(); - TestStructMessage message = TestStructMessage.newBuilder() - .setOrderNumber("123X") - .setCustomFields(structValue) - .build(); - - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), message.toByteArray()); - Descriptors.FieldDescriptor customValues = dynamicMessage.getDescriptorForType().findFieldByName("custom_fields"); - structProtoField = new StructProtoField(customValues, dynamicMessage.getField(customValues)); - } - - @Test - public void shouldSerialiseStructIntoJson() { - String value = (String) structProtoField.getValue(); - String jsonStr = "{\"null_value\":null," - + "\"number_value\":2.0," - + "\"string_value\":\"\"," - + "\"bool_value\":false," - + "\"struct_value\":{\"child_value1\":1.0}," - + "\"list_value\":[1.0,2.0]}"; - - assertEquals(jsonStr, value); - } - - @Test - public void shouldSerialiseRepeatedStructsIntoJson() throws InvalidProtocolBufferException { - Struct simpleStruct = Struct.newBuilder() - .putFields("null_value", Value.newBuilder().setNullValue(NullValue.NULL_VALUE) - .build()) - .putFields("number_value", Value.newBuilder().setNumberValue(2.0).build()) - .build(); - - TestStructMessage message = TestStructMessage.newBuilder() - .setOrderNumber("123X") - .addListCustomFields(simpleStruct) - .addListCustomFields(simpleStruct) - .build(); - - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), message.toByteArray()); - Descriptors.FieldDescriptor listCustomFieldsDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("list_custom_fields"); - structProtoField = new StructProtoField(listCustomFieldsDescriptor, dynamicMessage.getField(listCustomFieldsDescriptor)); - - Object value = structProtoField.getValue(); - - List jsonStrList = new ArrayList<>(); - jsonStrList.add("{\"null_value\":null,\"number_value\":2.0}"); - jsonStrList.add("{\"null_value\":null,\"number_value\":2.0}"); - - assertEquals(jsonStrList, value); - } - - @Test - public void shouldMatchStruct() { - boolean isMatch = structProtoField.matches(); - assertTrue(isMatch); - } -} diff --git a/src/test/java/io/odpf/depot/metrics/MetricsTest.java b/src/test/java/io/odpf/depot/metrics/MetricsTest.java deleted file mode 100644 index bcd7bf62..00000000 --- a/src/test/java/io/odpf/depot/metrics/MetricsTest.java +++ /dev/null @@ -1,4 +0,0 @@ -package io.odpf.depot.metrics; - -public class MetricsTest { -} diff --git a/src/test/java/io/odpf/depot/redis/parsers/RedisKeyValueEntryParserTest.java b/src/test/java/io/odpf/depot/redis/parsers/RedisKeyValueEntryParserTest.java deleted file mode 100644 index 7a1e23b4..00000000 --- a/src/test/java/io/odpf/depot/redis/parsers/RedisKeyValueEntryParserTest.java +++ /dev/null @@ -1,81 +0,0 @@ -package io.odpf.depot.redis.parsers; - -import com.google.protobuf.Descriptors; -import io.odpf.depot.TestKey; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestNestedMessage; -import io.odpf.depot.TestNestedRepeatedMessage; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.entry.RedisKeyValueEntry; -import io.odpf.depot.redis.enums.RedisSinkDataType; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class RedisKeyValueEntryParserTest { - private final Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); - put(String.format("%s", TestNestedRepeatedMessage.class.getName()), TestNestedRepeatedMessage.getDescriptor()); - }}; - @Mock - private RedisSinkConfig redisSinkConfig; - @Mock - private StatsDReporter statsDReporter; - private RedisEntryParser redisKeyValueEntryParser; - private OdpfMessageSchema schema; - private ParsedOdpfMessage parsedOdpfMessage; - - private void redisSinkSetup(String template, String field) throws IOException { - when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.KEYVALUE); - when(redisSinkConfig.getSinkRedisKeyValueDataFieldName()).thenReturn(field); - when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn(template); - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(redisSinkConfig, statsDReporter, null); - String schemaClass = "io.odpf.depot.TestMessage"; - schema = odpfMessageParser.getSchema(schemaClass, descriptorsMap); - byte[] logMessage = TestMessage.newBuilder() - .setOrderNumber("xyz-order") - .setOrderDetails("new-eureka-order") - .build() - .toByteArray(); - OdpfMessage message = new OdpfMessage(null, logMessage); - parsedOdpfMessage = odpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass); - redisKeyValueEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schema); - } - - @Test - public void shouldConvertParsedOdpfMessageToRedisKeyValueEntry() throws IOException { - redisSinkSetup("test-key", "order_details"); - List redisDataEntries = redisKeyValueEntryParser.getRedisEntry(parsedOdpfMessage); - RedisKeyValueEntry expectedEntry = new RedisKeyValueEntry("test-key", "new-eureka-order", null); - assertEquals(Collections.singletonList(expectedEntry), redisDataEntries); - } - - @Test - public void shouldThrowExceptionForInvalidKeyValueDataFieldName() throws IOException { - redisSinkSetup("test-key", "random-field"); - IllegalArgumentException exception = - assertThrows(IllegalArgumentException.class, () -> redisKeyValueEntryParser.getRedisEntry(parsedOdpfMessage)); - assertEquals("Invalid field config : random-field", exception.getMessage()); - } -} diff --git a/src/test/java/io/odpf/depot/redis/parsers/RedisListEntryParserTest.java b/src/test/java/io/odpf/depot/redis/parsers/RedisListEntryParserTest.java deleted file mode 100644 index 555e1f75..00000000 --- a/src/test/java/io/odpf/depot/redis/parsers/RedisListEntryParserTest.java +++ /dev/null @@ -1,81 +0,0 @@ -package io.odpf.depot.redis.parsers; - -import com.google.protobuf.Descriptors; -import io.odpf.depot.TestKey; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestNestedMessage; -import io.odpf.depot.TestNestedRepeatedMessage; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.entry.RedisListEntry; -import io.odpf.depot.redis.enums.RedisSinkDataType; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertThrows; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class RedisListEntryParserTest { - private final Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); - put(String.format("%s", TestNestedRepeatedMessage.class.getName()), TestNestedRepeatedMessage.getDescriptor()); - }}; - @Mock - private RedisSinkConfig redisSinkConfig; - @Mock - private StatsDReporter statsDReporter; - private RedisEntryParser redisListEntryParser; - private OdpfMessageSchema schema; - private ParsedOdpfMessage parsedOdpfMessage; - - private void redisSinkSetup(String template, String field) throws IOException { - when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.LIST); - when(redisSinkConfig.getSinkRedisListDataFieldName()).thenReturn(field); - when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn(template); - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(redisSinkConfig, statsDReporter, null); - String schemaClass = "io.odpf.depot.TestMessage"; - schema = odpfMessageParser.getSchema(schemaClass, descriptorsMap); - byte[] logMessage = TestMessage.newBuilder() - .setOrderNumber("xyz-order") - .setOrderDetails("new-eureka-order") - .build() - .toByteArray(); - OdpfMessage message = new OdpfMessage(null, logMessage); - parsedOdpfMessage = odpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass); - redisListEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schema); - } - - @Test - public void shouldConvertParsedOdpfMessageToRedisListEntry() throws IOException { - redisSinkSetup("test-key", "order_details"); - List redisDataEntries = redisListEntryParser.getRedisEntry(parsedOdpfMessage); - RedisListEntry expectedEntry = new RedisListEntry("test-key", "new-eureka-order", null); - assertEquals(Collections.singletonList(expectedEntry), redisDataEntries); - } - - @Test - public void shouldThrowExceptionForInvalidKeyValueDataFieldName() throws IOException { - redisSinkSetup("test-key", "random-field"); - IllegalArgumentException exception = - assertThrows(IllegalArgumentException.class, () -> redisListEntryParser.getRedisEntry(parsedOdpfMessage)); - assertEquals("Invalid field config : random-field", exception.getMessage()); - } -} diff --git a/src/test/java/io/odpf/depot/redis/parsers/RedisParserTest.java b/src/test/java/io/odpf/depot/redis/parsers/RedisParserTest.java deleted file mode 100644 index d49ec010..00000000 --- a/src/test/java/io/odpf/depot/redis/parsers/RedisParserTest.java +++ /dev/null @@ -1,131 +0,0 @@ -package io.odpf.depot.redis.parsers; - -import com.google.protobuf.Descriptors; -import io.odpf.depot.TestKey; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestNestedMessage; -import io.odpf.depot.TestNestedRepeatedMessage; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.config.enums.SinkConnectorSchemaDataType; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.message.*; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.message.proto.ProtoOdpfParsedMessage; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisKeyValueEntry; -import io.odpf.depot.redis.enums.RedisSinkDataType; -import io.odpf.depot.redis.record.RedisRecord; -import io.odpf.depot.utils.MessageConfigUtils; -import io.odpf.stencil.Parser; -import io.odpf.stencil.StencilClientFactory; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.mockito.Mock; -import org.mockito.junit.MockitoJUnitRunner; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.mockito.Mockito.when; - -@RunWith(MockitoJUnitRunner.class) -public class RedisParserTest { - private final List messages = new ArrayList<>(); - private final String schemaClass = "io.odpf.depot.TestMessage"; - private final Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); - put(String.format("%s", TestNestedRepeatedMessage.class.getName()), TestNestedRepeatedMessage.getDescriptor()); - }}; - @Mock - private RedisSinkConfig redisSinkConfig; - @Mock - private ProtoOdpfMessageParser odpfMessageParser; - @Mock - private StatsDReporter statsDReporter; - private RedisParser redisParser; - - @Before - public void setup() throws IOException { - when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.KEYVALUE); - when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn("test-key"); - when(redisSinkConfig.getSinkRedisKeyValueDataFieldName()).thenReturn("order_number"); - when(redisSinkConfig.getSinkConnectorSchemaMessageMode()).thenReturn(SinkConnectorSchemaMessageMode.LOG_MESSAGE); - when(redisSinkConfig.getSinkConnectorSchemaProtoMessageClass()).thenReturn(schemaClass); - when(redisSinkConfig.getSinkConnectorSchemaDataType()).thenReturn(SinkConnectorSchemaDataType.PROTOBUF); - TestMessage message1 = TestMessage.newBuilder().setOrderNumber("test-order-1").setOrderDetails("ORDER-DETAILS-1").build(); - TestMessage message2 = TestMessage.newBuilder().setOrderNumber("test-order-2").setOrderDetails("ORDER-DETAILS-2").build(); - TestMessage message3 = TestMessage.newBuilder().setOrderNumber("test-order-3").setOrderDetails("ORDER-DETAILS-3").build(); - TestMessage message4 = TestMessage.newBuilder().setOrderNumber("test-order-4").setOrderDetails("ORDER-DETAILS-4").build(); - TestMessage message5 = TestMessage.newBuilder().setOrderNumber("test-order-5").setOrderDetails("ORDER-DETAILS-5").build(); - TestMessage message6 = TestMessage.newBuilder().setOrderNumber("test-order-6").setOrderDetails("ORDER-DETAILS-6").build(); - messages.add(new OdpfMessage(null, message1.toByteArray())); - messages.add(new OdpfMessage(null, message2.toByteArray())); - messages.add(new OdpfMessage(null, message3.toByteArray())); - messages.add(new OdpfMessage(null, message4.toByteArray())); - messages.add(new OdpfMessage(null, message5.toByteArray())); - messages.add(new OdpfMessage(null, message6.toByteArray())); - } - - public void setupParserResponse() throws IOException { - Parser protoParser = StencilClientFactory.getClient().getParser(TestMessage.class.getName()); - for (OdpfMessage message : messages) { - ParsedOdpfMessage parsedOdpfMessage = new ProtoOdpfParsedMessage(protoParser.parse((byte[]) message.getLogMessage())); - when(odpfMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)).thenReturn(parsedOdpfMessage); - } - ProtoOdpfMessageParser messageParser = (ProtoOdpfMessageParser) OdpfMessageParserFactory.getParser(redisSinkConfig, statsDReporter); - Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(redisSinkConfig); - OdpfMessageSchema schema = messageParser.getSchema(modeAndSchema.getSecond(), descriptorsMap); - RedisEntryParser redisEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schema); - redisParser = new RedisParser(odpfMessageParser, redisEntryParser, modeAndSchema); - } - - @Test - public void shouldConvertOdpfMessageToRedisRecords() throws IOException { - setupParserResponse(); - List parsedRecords = redisParser.convert(messages); - Map> splitterRecords = parsedRecords.stream().collect(Collectors.partitioningBy(RedisRecord::isValid)); - List invalidRecords = splitterRecords.get(Boolean.FALSE); - List validRecords = splitterRecords.get(Boolean.TRUE); - assertEquals(6, validRecords.size()); - assertTrue(invalidRecords.isEmpty()); - List expectedRecords = new ArrayList<>(); - expectedRecords.add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-1", null), 0L, null, "{}", true)); - expectedRecords.add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-2", null), 1L, null, "{}", true)); - expectedRecords.add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-3", null), 2L, null, "{}", true)); - expectedRecords.add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-4", null), 3L, null, "{}", true)); - expectedRecords.add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-5", null), 4L, null, "{}", true)); - expectedRecords.add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-6", null), 5L, null, "{}", true)); - IntStream.range(0, expectedRecords.size()).forEach(index -> assertEquals(expectedRecords.get(index).toString(), parsedRecords.get(index).toString())); - } - - @Test - public void shouldReportValidAndInvalidRecords() throws IOException { - setupParserResponse(); - when(odpfMessageParser.parse(messages.get(2), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)).thenThrow(new IOException("Error while parsing protobuf")); - when(odpfMessageParser.parse(messages.get(3), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)).thenThrow(new ConfigurationException("Invalid field config : INVALID")); - when(odpfMessageParser.parse(messages.get(4), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)).thenThrow(new IllegalArgumentException("Config REDIS_CONFIG is empty")); - when(odpfMessageParser.parse(messages.get(5), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)).thenThrow(new UnsupportedOperationException("some message")); - List parsedRecords = redisParser.convert(messages); - Map> splitterRecords = parsedRecords.stream().collect(Collectors.partitioningBy(RedisRecord::isValid)); - List invalidRecords = splitterRecords.get(Boolean.FALSE); - List validRecords = splitterRecords.get(Boolean.TRUE); - assertEquals(2, validRecords.size()); - assertEquals(4, invalidRecords.size()); - assertEquals(ErrorType.DESERIALIZATION_ERROR, parsedRecords.get(2).getErrorInfo().getErrorType()); - assertEquals(ErrorType.UNKNOWN_FIELDS_ERROR, parsedRecords.get(3).getErrorInfo().getErrorType()); - assertEquals(ErrorType.DEFAULT_ERROR, parsedRecords.get(4).getErrorInfo().getErrorType()); - assertEquals(ErrorType.INVALID_MESSAGE_ERROR, parsedRecords.get(5).getErrorInfo().getErrorType()); - } -} diff --git a/src/test/java/io/odpf/depot/utils/ProtoUtilTest.java b/src/test/java/io/odpf/depot/utils/ProtoUtilTest.java deleted file mode 100644 index 26a5c7fc..00000000 --- a/src/test/java/io/odpf/depot/utils/ProtoUtilTest.java +++ /dev/null @@ -1,72 +0,0 @@ -package io.odpf.depot.utils; - -import com.google.protobuf.Descriptors; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.UnknownFieldSet; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestLocation; -import org.junit.Test; - -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class ProtoUtilTest { - @Test - public void shouldReturnTrueWhenUnknownFieldsExistOnRootLevelFields() { - Descriptors.Descriptor bookingLogMessage = TestBookingLogMessage.getDescriptor(); - Descriptors.Descriptor location = TestLocation.getDescriptor(); - - Descriptors.FieldDescriptor fieldDescriptor = bookingLogMessage.findFieldByName("driver_pickup_location"); - DynamicMessage dynamicMessage = DynamicMessage.newBuilder(bookingLogMessage) - .setField(fieldDescriptor, DynamicMessage.newBuilder(location) - .build()) - .setUnknownFields(UnknownFieldSet.newBuilder() - .addField(1, UnknownFieldSet.Field.getDefaultInstance()) - .addField(2, UnknownFieldSet.Field.getDefaultInstance()) - .build()) - .build(); - - boolean unknownFieldExist = ProtoUtils.hasUnknownField(dynamicMessage); - assertTrue(unknownFieldExist); - } - - @Test - public void shouldReturnTrueWhenUnknownFieldsExistOnNestedChildFields() { - Descriptors.Descriptor bookingLogMessage = TestBookingLogMessage.getDescriptor(); - Descriptors.Descriptor location = TestLocation.getDescriptor(); - Descriptors.FieldDescriptor fieldDescriptor = bookingLogMessage.findFieldByName("driver_pickup_location"); - - DynamicMessage dynamicMessage = DynamicMessage.newBuilder(bookingLogMessage) - .setField(fieldDescriptor, DynamicMessage.newBuilder(location) - .setUnknownFields(UnknownFieldSet.newBuilder() - .addField(1, UnknownFieldSet.Field.getDefaultInstance()) - .addField(2, UnknownFieldSet.Field.getDefaultInstance()) - .build()) - .build()) - .build(); - - boolean unknownFieldExist = ProtoUtils.hasUnknownField(dynamicMessage); - assertTrue(unknownFieldExist); - } - - @Test - public void shouldReturnFalseWhenNoUnknownFieldsExist() { - Descriptors.Descriptor bookingLogMessage = TestBookingLogMessage.getDescriptor(); - Descriptors.Descriptor location = TestLocation.getDescriptor(); - - Descriptors.FieldDescriptor fieldDescriptor = bookingLogMessage.findFieldByName("driver_pickup_location"); - DynamicMessage dynamicMessage = DynamicMessage.newBuilder(bookingLogMessage) - .setField(fieldDescriptor, DynamicMessage.newBuilder(location).build()) - .build(); - - boolean unknownFieldExist = ProtoUtils.hasUnknownField(dynamicMessage); - assertFalse(unknownFieldExist); - } - - @Test - public void shouldReturnFalseWhenRootIsNull() { - boolean unknownFieldExist = ProtoUtils.hasUnknownField(null); - assertFalse(unknownFieldExist); - } -} - diff --git a/src/test/java/io/odpf/depot/bigquery/BigQuerySinkTest.java b/src/test/java/org/raystack/depot/bigquery/BigQuerySinkTest.java similarity index 52% rename from src/test/java/io/odpf/depot/bigquery/BigQuerySinkTest.java rename to src/test/java/org/raystack/depot/bigquery/BigQuerySinkTest.java index f5220566..fab2b478 100644 --- a/src/test/java/io/odpf/depot/bigquery/BigQuerySinkTest.java +++ b/src/test/java/org/raystack/depot/bigquery/BigQuerySinkTest.java @@ -1,23 +1,23 @@ -package io.odpf.depot.bigquery; +package org.raystack.depot.bigquery; import com.google.cloud.bigquery.BigQueryError; import com.google.cloud.bigquery.InsertAllRequest; import com.google.cloud.bigquery.InsertAllResponse; import com.google.cloud.bigquery.TableId; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.bigquery.client.BigQueryRow; -import io.odpf.depot.bigquery.client.BigQueryRowWithInsertId; -import io.odpf.depot.bigquery.handler.ErrorHandler; -import io.odpf.depot.bigquery.converter.MessageRecordConverter; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.bigquery.models.Records; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.message.OdpfMessage; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.client.BigQueryRow; +import org.raystack.depot.bigquery.client.BigQueryRowWithInsertId; +import org.raystack.depot.bigquery.converter.MessageRecordConverter; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.handler.ErrorHandler; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Records; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.message.Message; import org.aeonbits.owner.util.Collections; import org.junit.Assert; import org.junit.Before; @@ -31,13 +31,13 @@ import java.util.List; import java.util.Map; - public class BigQuerySinkTest { private final TableId tableId = TableId.of("test_dataset", "test_table"); private final MessageRecordConverterCache converterCache = new MessageRecordConverterCache(); private final BigQueryRow rowCreator = new BigQueryRowWithInsertId( - metadata -> metadata.get("topic") + "_" + metadata.get("partition") + "_" + metadata.get("offset") + "_" + metadata.get("timestamp")); + metadata -> metadata.get("topic") + "_" + metadata.get("partition") + "_" + metadata.get("offset") + "_" + + metadata.get("timestamp")); @Mock private BigQueryClient client; @Mock @@ -63,26 +63,39 @@ public void setup() { @Test public void shouldPushToBigQuerySink() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - OdpfMessage message1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage message2 = TestOdpfMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - OdpfMessage message3 = TestOdpfMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"); - OdpfMessage message4 = TestOdpfMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"); - OdpfMessage message5 = TestOdpfMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"); - OdpfMessage message6 = TestOdpfMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"); - List messages = Collections.list(message1, message2, message3, message4, message5, message6); + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + Message message1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); + Message message2 = TestMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", + "order-details-2"); + Message message3 = TestMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", + "order-details-3"); + Message message4 = TestMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", + "order-details-4"); + Message message5 = TestMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", + "order-details-5"); + Message message6 = TestMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", + "order-details-6"); + List messages = Collections.list(message1, message2, message3, message4, message5, message6); Record record1 = new Record(message1.getMetadata(), new HashMap<>(), 0, null); Record record2 = new Record(message2.getMetadata(), new HashMap<>(), 1, null); Record record3 = new Record(message3.getMetadata(), new HashMap<>(), 2, null); Record record4 = new Record(message4.getMetadata(), new HashMap<>(), 3, null); Record record5 = new Record(message5.getMetadata(), new HashMap<>(), 4, null); Record record6 = new Record(message6.getMetadata(), new HashMap<>(), 5, null); - Records records = new Records(Collections.list(record1, record2, record3, record4, record5, record6), java.util.Collections.emptyList()); + Records records = new Records(Collections.list(record1, record2, record3, record4, record5, record6), + java.util.Collections.emptyList()); InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(client.getTableID()); records.getValidRecords().forEach((Record m) -> builder.addRow(rowCreator.of(m))); @@ -90,33 +103,48 @@ public void shouldPushToBigQuerySink() { Mockito.when(converter.convert(Mockito.eq(messages))).thenReturn(records); Mockito.when(client.insertAll(rows)).thenReturn(insertAllResponse); Mockito.when(insertAllResponse.hasErrors()).thenReturn(false); - OdpfSinkResponse response = sink.pushToSink(messages); + SinkResponse response = sink.pushToSink(messages); Assert.assertEquals(0, response.getErrors().size()); Mockito.verify(client, Mockito.times(1)).insertAll(rows); } @Test public void shouldReturnInvalidMessages() throws Exception { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - OdpfMessage message1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage message2 = TestOdpfMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - OdpfMessage message3 = TestOdpfMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"); - OdpfMessage message4 = TestOdpfMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"); - OdpfMessage message5 = TestOdpfMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"); - OdpfMessage message6 = TestOdpfMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"); - List messages = Collections.list(message1, message2, message3, message4, message5, message6); + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + Message message1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); + Message message2 = TestMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", + "order-details-2"); + Message message3 = TestMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", + "order-details-3"); + Message message4 = TestMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", + "order-details-4"); + Message message5 = TestMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", + "order-details-5"); + Message message6 = TestMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", + "order-details-6"); + List messages = Collections.list(message1, message2, message3, message4, message5, message6); Record record1 = new Record(message1.getMetadata(), new HashMap<>(), 0, null); - Record record2 = new Record(message2.getMetadata(), new HashMap<>(), 1, new ErrorInfo(new RuntimeException(), ErrorType.DEFAULT_ERROR)); + Record record2 = new Record(message2.getMetadata(), new HashMap<>(), 1, + new ErrorInfo(new RuntimeException(), ErrorType.DEFAULT_ERROR)); Record record3 = new Record(message3.getMetadata(), new HashMap<>(), 2, null); - Record record4 = new Record(message4.getMetadata(), new HashMap<>(), 3, new ErrorInfo(new RuntimeException(), ErrorType.INVALID_MESSAGE_ERROR)); + Record record4 = new Record(message4.getMetadata(), new HashMap<>(), 3, + new ErrorInfo(new RuntimeException(), ErrorType.INVALID_MESSAGE_ERROR)); Record record5 = new Record(message5.getMetadata(), new HashMap<>(), 4, null); Record record6 = new Record(message6.getMetadata(), new HashMap<>(), 5, null); - Records records = new Records(Collections.list(record1, record3, record5, record6), Collections.list(record2, record4)); + Records records = new Records(Collections.list(record1, record3, record5, record6), + Collections.list(record2, record4)); InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(client.getTableID()); records.getValidRecords().forEach((Record m) -> builder.addRow(rowCreator.of(m))); @@ -124,7 +152,7 @@ public void shouldReturnInvalidMessages() throws Exception { Mockito.when(converter.convert(Mockito.eq(messages))).thenReturn(records); Mockito.when(client.insertAll(rows)).thenReturn(insertAllResponse); Mockito.when(insertAllResponse.hasErrors()).thenReturn(false); - OdpfSinkResponse response = sink.pushToSink(messages); + SinkResponse response = sink.pushToSink(messages); Assert.assertEquals(2, response.getErrors().size()); Mockito.verify(client, Mockito.times(1)).insertAll(rows); @@ -134,26 +162,41 @@ public void shouldReturnInvalidMessages() throws Exception { @Test public void shouldReturnInvalidMessagesWithFailedInsertMessages() throws Exception { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - OdpfMessage message1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage message2 = TestOdpfMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - OdpfMessage message3 = TestOdpfMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", "order-details-3"); - OdpfMessage message4 = TestOdpfMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", "order-details-4"); - OdpfMessage message5 = TestOdpfMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", "order-details-5"); - OdpfMessage message6 = TestOdpfMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", "order-details-6"); - List messages = Collections.list(message1, message2, message3, message4, message5, message6); + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + Message message1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); + Message message2 = TestMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", + "order-details-2"); + Message message3 = TestMessageBuilder.withMetadata(record3Offset).createConsumerRecord("order-3", "order-url-3", + "order-details-3"); + Message message4 = TestMessageBuilder.withMetadata(record4Offset).createConsumerRecord("order-4", "order-url-4", + "order-details-4"); + Message message5 = TestMessageBuilder.withMetadata(record5Offset).createConsumerRecord("order-5", "order-url-5", + "order-details-5"); + Message message6 = TestMessageBuilder.withMetadata(record6Offset).createConsumerRecord("order-6", "order-url-6", + "order-details-6"); + List messages = Collections.list(message1, message2, message3, message4, message5, message6); Record record1 = new Record(message1.getMetadata(), new HashMap<>(), 0, null); - Record record2 = new Record(message2.getMetadata(), new HashMap<>(), 1, new ErrorInfo(new RuntimeException(), ErrorType.DEFAULT_ERROR)); + Record record2 = new Record(message2.getMetadata(), new HashMap<>(), 1, + new ErrorInfo(new RuntimeException(), ErrorType.DEFAULT_ERROR)); Record record3 = new Record(message3.getMetadata(), new HashMap<>(), 2, null); - Record record4 = new Record(message4.getMetadata(), new HashMap<>(), 3, new ErrorInfo(new RuntimeException(), ErrorType.INVALID_MESSAGE_ERROR)); + Record record4 = new Record(message4.getMetadata(), new HashMap<>(), 3, + new ErrorInfo(new RuntimeException(), ErrorType.INVALID_MESSAGE_ERROR)); Record record5 = new Record(message5.getMetadata(), new HashMap<>(), 4, null); Record record6 = new Record(message6.getMetadata(), new HashMap<>(), 5, null); - Records records = new Records(Collections.list(record1, record3, record5, record6), Collections.list(record2, record4)); + Records records = new Records(Collections.list(record1, record3, record5, record6), + Collections.list(record2, record4)); InsertAllRequest.Builder builder = InsertAllRequest.newBuilder(client.getTableID()); records.getValidRecords().forEach((Record m) -> builder.addRow(rowCreator.of(m))); @@ -163,15 +206,18 @@ public void shouldReturnInvalidMessagesWithFailedInsertMessages() throws Excepti Mockito.when(insertAllResponse.hasErrors()).thenReturn(true); BigQueryError error1 = new BigQueryError("", "US", ""); - BigQueryError error3 = new BigQueryError("invalid", "", "The destination table's partition tmp$20160101 is outside the allowed bounds. You can only stream to partitions within 1825 days in the past and 366 days in the future relative to the current date"); - - Map> insertErrorsMap = new HashMap>() {{ - put(0L, Collections.list(error1)); - put(2L, Collections.list(error3)); - }}; + BigQueryError error3 = new BigQueryError("invalid", "", + "The destination table's partition tmp$20160101 is outside the allowed bounds. You can only stream to partitions within 1825 days in the past and 366 days in the future relative to the current date"); + + Map> insertErrorsMap = new HashMap>() { + { + put(0L, Collections.list(error1)); + put(2L, Collections.list(error3)); + } + }; Mockito.when(insertAllResponse.getInsertErrors()).thenReturn(insertErrorsMap); - OdpfSinkResponse response = sink.pushToSink(messages); + SinkResponse response = sink.pushToSink(messages); Mockito.verify(client, Mockito.times(1)).insertAll(rows); Mockito.verify(errorHandler, Mockito.times(1)).handle(Mockito.eq(insertErrorsMap), Mockito.any()); Assert.assertEquals(4, response.getErrors().size()); diff --git a/src/test/java/io/odpf/depot/bigquery/TestOdpfMessageBuilder.java b/src/test/java/org/raystack/depot/bigquery/TestMessageBuilder.java similarity index 78% rename from src/test/java/io/odpf/depot/bigquery/TestOdpfMessageBuilder.java rename to src/test/java/org/raystack/depot/bigquery/TestMessageBuilder.java index 85f060d9..f1d0ed28 100644 --- a/src/test/java/io/odpf/depot/bigquery/TestOdpfMessageBuilder.java +++ b/src/test/java/org/raystack/depot/bigquery/TestMessageBuilder.java @@ -1,29 +1,28 @@ -package io.odpf.depot.bigquery; +package org.raystack.depot.bigquery; import com.google.api.client.util.DateTime; -import io.odpf.depot.TestKeyBQ; -import io.odpf.depot.TestMessageBQ; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.message.OdpfMessage; +import org.raystack.depot.TestKeyBQ; +import org.raystack.depot.TestMessageBQ; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.message.Message; import java.sql.Date; import java.time.Instant; import java.util.HashMap; import java.util.Map; - -public final class TestOdpfMessageBuilder { +public final class TestMessageBuilder { private long timestamp; private String topic; private int partition; private long offset; private long loadTime; - private TestOdpfMessageBuilder() { + private TestMessageBuilder() { } - public static TestOdpfMessageBuilder withMetadata(TestMetadata testMetadata) { - TestOdpfMessageBuilder builder = new TestOdpfMessageBuilder(); + public static TestMessageBuilder withMetadata(TestMetadata testMetadata) { + TestMessageBuilder builder = new TestMessageBuilder(); builder.topic = testMetadata.getTopic(); builder.partition = testMetadata.getPartition(); builder.offset = testMetadata.getOffset(); @@ -32,7 +31,7 @@ public static TestOdpfMessageBuilder withMetadata(TestMetadata testMetadata) { return builder; } - public OdpfMessage createConsumerRecord(String orderNumber, String orderUrl, String orderDetails) { + public Message createConsumerRecord(String orderNumber, String orderUrl, String orderDetails) { TestKeyBQ key = TestKeyBQ.newBuilder() .setOrderNumber(orderNumber) .setOrderUrl(orderUrl) @@ -42,7 +41,7 @@ public OdpfMessage createConsumerRecord(String orderNumber, String orderUrl, Str .setOrderUrl(orderUrl) .setOrderDetails(orderDetails) .build(); - return new OdpfMessage( + return new Message( key.toByteArray(), message.toByteArray(), new Tuple<>("message_topic", topic), @@ -53,12 +52,12 @@ public OdpfMessage createConsumerRecord(String orderNumber, String orderUrl, Str new Tuple<>("should_be_ignored", timestamp)); } - public OdpfMessage createEmptyValueConsumerRecord(String orderNumber, String orderUrl) { + public Message createEmptyValueConsumerRecord(String orderNumber, String orderUrl) { TestKeyBQ key = TestKeyBQ.newBuilder() .setOrderNumber(orderNumber) .setOrderUrl(orderUrl) .build(); - return new OdpfMessage( + return new Message( key.toByteArray(), null, new Tuple<>("message_topic", topic), diff --git a/src/test/java/io/odpf/depot/bigquery/TestMetadata.java b/src/test/java/org/raystack/depot/bigquery/TestMetadata.java similarity index 90% rename from src/test/java/io/odpf/depot/bigquery/TestMetadata.java rename to src/test/java/org/raystack/depot/bigquery/TestMetadata.java index f4d02a85..6d5d802c 100644 --- a/src/test/java/io/odpf/depot/bigquery/TestMetadata.java +++ b/src/test/java/org/raystack/depot/bigquery/TestMetadata.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery; +package org.raystack.depot.bigquery; import lombok.AllArgsConstructor; import lombok.EqualsAndHashCode; diff --git a/src/test/java/io/odpf/depot/bigquery/client/BigQueryClientTest.java b/src/test/java/org/raystack/depot/bigquery/client/BigQueryClientTest.java similarity index 96% rename from src/test/java/io/odpf/depot/bigquery/client/BigQueryClientTest.java rename to src/test/java/org/raystack/depot/bigquery/client/BigQueryClientTest.java index 084894e3..296be0cc 100644 --- a/src/test/java/io/odpf/depot/bigquery/client/BigQueryClientTest.java +++ b/src/test/java/org/raystack/depot/bigquery/client/BigQueryClientTest.java @@ -1,9 +1,9 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.*; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.metrics.BigQueryMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; @@ -209,18 +209,22 @@ public void shouldUpdateTableIfTableNeedsToSetPartitionExpiry() { when(bqConfig.getBigQueryDatasetLocation()).thenReturn("US"); bqClient = new BigQueryClient(bigquery, bqConfig, metrics, instrumentation); - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("partition_column", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("offset", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("topic", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("load_time", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("partition", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); - }}; + ArrayList bqSchemaFields = new ArrayList() { + { + add(Field.newBuilder("test-1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("partition_column", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE) + .build()); + add(Field.newBuilder("offset", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("topic", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("load_time", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("timestamp", LegacySQLTypeName.TIMESTAMP).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("partition", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build()); + } + }; Schema bqSchema = Schema.of(bqSchemaFields); - StandardTableDefinition standardTableDefinition = new BigQueryTableDefinition(bqConfig).getTableDefinition(bqSchema); + StandardTableDefinition standardTableDefinition = new BigQueryTableDefinition(bqConfig) + .getTableDefinition(bqSchema); TableId tableId = TableId.of(bqConfig.getDatasetName(), bqConfig.getTableName()); TableInfo tableInfo = TableInfo.newBuilder(tableId, standardTableDefinition).build(); diff --git a/src/test/java/org/raystack/depot/bigquery/client/BigQueryResponseParserTest.java b/src/test/java/org/raystack/depot/bigquery/client/BigQueryResponseParserTest.java new file mode 100644 index 00000000..ef4e6aa0 --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/client/BigQueryResponseParserTest.java @@ -0,0 +1,116 @@ +package org.raystack.depot.bigquery.client; + +import com.google.cloud.bigquery.BigQueryError; +import com.google.cloud.bigquery.InsertAllResponse; +import org.raystack.depot.bigquery.TestMetadata; +import org.raystack.depot.bigquery.TestMessageBuilder; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.exception.BigQuerySinkException; +import org.raystack.depot.bigquery.models.Record; +import org.aeonbits.owner.util.Collections; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.MockitoAnnotations; + +import java.time.Instant; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class BigQueryResponseParserTest { + + @Mock + private InsertAllResponse response; + + @Mock + private Instrumentation instrumentation; + + @Mock + private BigQueryMetrics metrics; + + @Before + public void setup() { + MockitoAnnotations.initMocks(this); + } + + @Test + public void shouldParseResponse() { + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record3Offset = new TestMetadata("topic1", 3, 103, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record4Offset = new TestMetadata("topic1", 4, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record5Offset = new TestMetadata("topic1", 5, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + TestMetadata record6Offset = new TestMetadata("topic1", 6, 104, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + Record record1 = new Record( + TestMessageBuilder.withMetadata(record1Offset) + .createConsumerRecord("order-1", "order-url-1", "order-details-1").getMetadata(), + new HashMap<>(), 0, null); + Record record2 = new Record( + TestMessageBuilder.withMetadata(record2Offset) + .createConsumerRecord("order-2", "order-url-2", "order-details-2").getMetadata(), + new HashMap<>(), 1, null); + Record record3 = new Record( + TestMessageBuilder.withMetadata(record3Offset) + .createConsumerRecord("order-3", "order-url-3", "order-details-3").getMetadata(), + new HashMap<>(), 2, null); + Record record4 = new Record( + TestMessageBuilder.withMetadata(record4Offset) + .createConsumerRecord("order-4", "order-url-4", "order-details-4").getMetadata(), + new HashMap<>(), 3, null); + Record record5 = new Record( + TestMessageBuilder.withMetadata(record5Offset) + .createConsumerRecord("order-5", "order-url-5", "order-details-5").getMetadata(), + new HashMap<>(), 4, null); + Record record6 = new Record( + TestMessageBuilder.withMetadata(record6Offset) + .createConsumerRecord("order-6", "order-url-6", "order-details-6").getMetadata(), + new HashMap<>(), 5, null); + List records = Collections.list(record1, record2, record3, record4, record5, record6); + BigQueryError error1 = new BigQueryError("", "US", ""); + BigQueryError error2 = new BigQueryError("invalid", "US", "no such field"); + BigQueryError error3 = new BigQueryError("invalid", "", + "The destination table's partition tmp$20160101 is outside the allowed bounds. You can only stream to partitions within 1825 days in the past and 366 days in the future relative to the current date"); + BigQueryError error4 = new BigQueryError("stopped", "", ""); + + Map> insertErrorsMap = new HashMap>() { + { + put(0L, Collections.list(error1)); + put(1L, Collections.list(error2)); + put(2L, Collections.list(error3)); + put(3L, Collections.list(error4)); + } + }; + Mockito.when(response.hasErrors()).thenReturn(true); + Mockito.when(response.getInsertErrors()).thenReturn(insertErrorsMap); + Mockito.when(metrics.getBigqueryTotalErrorsMetrics()).thenReturn("test"); + Map errorInfoMap = BigQueryResponseParser.getErrorsFromBQResponse(records, response, metrics, + instrumentation); + + Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_UNKNOWN_ERROR), + errorInfoMap.get(0L)); + Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), errorInfoMap.get(1L)); + Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_4XX_ERROR), errorInfoMap.get(2L)); + Assert.assertEquals(new ErrorInfo(new BigQuerySinkException(), ErrorType.SINK_5XX_ERROR), errorInfoMap.get(3L)); + + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", + String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.UNKNOWN_ERROR)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", String + .format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.INVALID_SCHEMA_ERROR)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", + String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.OOB_ERROR)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter("test", + String.format(BigQueryMetrics.BIGQUERY_ERROR_TAG, BigQueryMetrics.BigQueryErrorType.STOPPED_ERROR)); + } +} diff --git a/src/test/java/io/odpf/depot/bigquery/client/BigQueryRowWithInsertIdTest.java b/src/test/java/org/raystack/depot/bigquery/client/BigQueryRowWithInsertIdTest.java similarity index 86% rename from src/test/java/io/odpf/depot/bigquery/client/BigQueryRowWithInsertIdTest.java rename to src/test/java/org/raystack/depot/bigquery/client/BigQueryRowWithInsertIdTest.java index 37a9b604..14aadf12 100644 --- a/src/test/java/io/odpf/depot/bigquery/client/BigQueryRowWithInsertIdTest.java +++ b/src/test/java/org/raystack/depot/bigquery/client/BigQueryRowWithInsertIdTest.java @@ -1,14 +1,13 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Record; import org.junit.Test; import java.util.HashMap; import static org.junit.Assert.assertEquals; - public class BigQueryRowWithInsertIdTest { @Test diff --git a/src/test/java/io/odpf/depot/bigquery/client/BigQueryRowWithoutInsertIdTest.java b/src/test/java/org/raystack/depot/bigquery/client/BigQueryRowWithoutInsertIdTest.java similarity index 85% rename from src/test/java/io/odpf/depot/bigquery/client/BigQueryRowWithoutInsertIdTest.java rename to src/test/java/org/raystack/depot/bigquery/client/BigQueryRowWithoutInsertIdTest.java index b517cccc..60097e10 100644 --- a/src/test/java/io/odpf/depot/bigquery/client/BigQueryRowWithoutInsertIdTest.java +++ b/src/test/java/org/raystack/depot/bigquery/client/BigQueryRowWithoutInsertIdTest.java @@ -1,7 +1,7 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.InsertAllRequest; -import io.odpf.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Record; import org.junit.Test; import java.util.HashMap; diff --git a/src/test/java/io/odpf/depot/bigquery/client/BigQueryTableDefinitionTest.java b/src/test/java/org/raystack/depot/bigquery/client/BigQueryTableDefinitionTest.java similarity index 73% rename from src/test/java/io/odpf/depot/bigquery/client/BigQueryTableDefinitionTest.java rename to src/test/java/org/raystack/depot/bigquery/client/BigQueryTableDefinitionTest.java index 4558e79a..e0cb4b49 100644 --- a/src/test/java/io/odpf/depot/bigquery/client/BigQueryTableDefinitionTest.java +++ b/src/test/java/org/raystack/depot/bigquery/client/BigQueryTableDefinitionTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.client; +package org.raystack.depot.bigquery.client; import com.google.cloud.bigquery.Clustering; import com.google.cloud.bigquery.Field; @@ -6,8 +6,8 @@ import com.google.cloud.bigquery.Schema; import com.google.cloud.bigquery.StandardTableDefinition; import com.google.cloud.bigquery.TimePartitioning; -import io.odpf.depot.bigquery.exception.BQClusteringKeysException; -import io.odpf.depot.config.BigQuerySinkConfig; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.bigquery.exception.BQClusteringKeysException; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -26,15 +26,15 @@ public class BigQueryTableDefinitionTest { - @Rule - public ExpectedException expectedEx = ExpectedException.none(); - @Mock - private BigQuerySinkConfig bqConfig; + @Rule + public ExpectedException expectedEx = ExpectedException.none(); + @Mock + private BigQuerySinkConfig bqConfig; - @Before - public void setup() { - bqConfig = Mockito.mock(BigQuerySinkConfig.class); - } + @Before + public void setup() { + bqConfig = Mockito.mock(BigQuerySinkConfig.class); + } @Test(expected = NullPointerException.class) public void shouldThrowExceptionWhenSchemaIsNull() { @@ -71,22 +71,21 @@ public void shouldThrowErrorIfPartitionFieldNotSet() { bigQueryTableDefinition.getTableDefinition(bqSchema); } - @Test - public void shouldReturnTimePartitioningWithPartitionExpiry() { - long partitionExpiry = 5184000000L; - when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(partitionExpiry); - when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); - when(bqConfig.getTablePartitionKey()).thenReturn("timestamp_field"); - Schema bqSchema = Schema.of( - Field.newBuilder("timestamp_field", LegacySQLTypeName.TIMESTAMP).build() - ); + @Test + public void shouldReturnTimePartitioningWithPartitionExpiry() { + long partitionExpiry = 5184000000L; + when(bqConfig.getBigQueryTablePartitionExpiryMS()).thenReturn(partitionExpiry); + when(bqConfig.isTablePartitioningEnabled()).thenReturn(true); + when(bqConfig.getTablePartitionKey()).thenReturn("timestamp_field"); + Schema bqSchema = Schema.of( + Field.newBuilder("timestamp_field", LegacySQLTypeName.TIMESTAMP).build()); - BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); - StandardTableDefinition tableDefinition = bigQueryTableDefinition.getTableDefinition(bqSchema); + BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); + StandardTableDefinition tableDefinition = bigQueryTableDefinition.getTableDefinition(bqSchema); - assertEquals("timestamp_field", tableDefinition.getTimePartitioning().getField()); - assertEquals(partitionExpiry, tableDefinition.getTimePartitioning().getExpirationMs().longValue()); - } + assertEquals("timestamp_field", tableDefinition.getTimePartitioning().getField()); + assertEquals(partitionExpiry, tableDefinition.getTimePartitioning().getExpirationMs().longValue()); + } @Test public void shouldReturnClusteringWithSingleColumns() { @@ -123,58 +122,57 @@ public void shouldReturnClusteringWithMultipleColumns() { assertEquals(expectedColumns, tableDefinition.getClustering().getFields()); } - @Test - public void shouldThrowExceptionIfClusteringKeyIsNotSet() { - expectedEx.expect(BQClusteringKeysException.class); - expectedEx.expectMessage("Clustering key not specified for the table: table_name"); - - when(bqConfig.isTableClusteringEnabled()).thenReturn(true); - when(bqConfig.getTableName()).thenReturn("table_name"); - - Schema bqSchema = Schema.of( - Field.of("string_field", LegacySQLTypeName.STRING) - ); - - BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); - bigQueryTableDefinition.getTableDefinition(bqSchema); - } - - @Test - public void shouldThrowExceptionIfClusteringKeyIsSetMoreThanFour() { - expectedEx.expect(BQClusteringKeysException.class); - expectedEx.expectMessage("Max number of columns for clustering is 4"); - - when(bqConfig.isTableClusteringEnabled()).thenReturn(true); - when(bqConfig.getTableClusteringKeys()).thenReturn(Arrays.asList("string_field", "int_field", "bool_field", "timestamp_field", "another_field")); - - Schema bqSchema = Schema.of( - Field.of("string_field", LegacySQLTypeName.STRING), - Field.of("int_field", LegacySQLTypeName.INTEGER), - Field.of("bool_field", LegacySQLTypeName.BOOLEAN), - Field.of("timestamp_field", LegacySQLTypeName.TIMESTAMP), - Field.of("another_field", LegacySQLTypeName.STRING) - ); - - BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); - bigQueryTableDefinition.getTableDefinition(bqSchema); - } - - @Test - public void shouldThrowExceptionIfClusteringKeyNotExistInSchema() { - expectedEx.expect(BQClusteringKeysException.class); - expectedEx.expectMessage("One or more column names specified [string_field] not exist on the schema or a nested type which is not supported for clustering"); - - when(bqConfig.isTableClusteringEnabled()).thenReturn(true); - when(bqConfig.getTableClusteringKeys()).thenReturn(Collections.singletonList("string_field")); - - Schema bqSchema = Schema.of( - Field.of("string_field2", LegacySQLTypeName.STRING), - Field.of("int_field2", LegacySQLTypeName.STRING) - ); - - BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); - bigQueryTableDefinition.getTableDefinition(bqSchema); - } + @Test + public void shouldThrowExceptionIfClusteringKeyIsNotSet() { + expectedEx.expect(BQClusteringKeysException.class); + expectedEx.expectMessage("Clustering key not specified for the table: table_name"); + + when(bqConfig.isTableClusteringEnabled()).thenReturn(true); + when(bqConfig.getTableName()).thenReturn("table_name"); + + Schema bqSchema = Schema.of( + Field.of("string_field", LegacySQLTypeName.STRING)); + + BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); + bigQueryTableDefinition.getTableDefinition(bqSchema); + } + + @Test + public void shouldThrowExceptionIfClusteringKeyIsSetMoreThanFour() { + expectedEx.expect(BQClusteringKeysException.class); + expectedEx.expectMessage("Max number of columns for clustering is 4"); + + when(bqConfig.isTableClusteringEnabled()).thenReturn(true); + when(bqConfig.getTableClusteringKeys()).thenReturn(Arrays.asList("string_field", "int_field", + "bool_field", "timestamp_field", "another_field")); + + Schema bqSchema = Schema.of( + Field.of("string_field", LegacySQLTypeName.STRING), + Field.of("int_field", LegacySQLTypeName.INTEGER), + Field.of("bool_field", LegacySQLTypeName.BOOLEAN), + Field.of("timestamp_field", LegacySQLTypeName.TIMESTAMP), + Field.of("another_field", LegacySQLTypeName.STRING)); + + BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); + bigQueryTableDefinition.getTableDefinition(bqSchema); + } + + @Test + public void shouldThrowExceptionIfClusteringKeyNotExistInSchema() { + expectedEx.expect(BQClusteringKeysException.class); + expectedEx.expectMessage( + "One or more column names specified [string_field] not exist on the schema or a nested type which is not supported for clustering"); + + when(bqConfig.isTableClusteringEnabled()).thenReturn(true); + when(bqConfig.getTableClusteringKeys()).thenReturn(Collections.singletonList("string_field")); + + Schema bqSchema = Schema.of( + Field.of("string_field2", LegacySQLTypeName.STRING), + Field.of("int_field2", LegacySQLTypeName.STRING)); + + BigQueryTableDefinition bigQueryTableDefinition = new BigQueryTableDefinition(bqConfig); + bigQueryTableDefinition.getTableDefinition(bqSchema); + } @Test public void shouldReturnPartitionedAndClusteredTableDefinition() { diff --git a/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterForJsonTest.java b/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterForJsonTest.java new file mode 100644 index 00000000..63d3a5eb --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterForJsonTest.java @@ -0,0 +1,266 @@ +package org.raystack.depot.bigquery.converter; + +import com.google.common.collect.ImmutableMap; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.json.JsonMessageParser; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.JsonParserMetrics; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Records; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Test; + +import java.text.DateFormat; +import java.text.ParseException; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.TimeZone; +import java.util.stream.Collectors; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.collection.IsEmptyCollection.empty; +import static org.hamcrest.collection.IsMapContaining.hasEntry; +import static org.junit.Assert.*; +import static org.mockito.Mockito.mock; + +public class MessageRecordConverterForJsonTest { + + private final SinkConfig defaultConfig = ConfigFactory.create(SinkConfig.class, Collections.emptyMap()); + private final Record.RecordBuilder recordBuilder = Record.builder(); + private final Map emptyMetadata = Collections.emptyMap(); + private final Map emptyColumnsMap = Collections.emptyMap(); + private final ErrorInfo noError = null; + private final Instrumentation instrumentation = mock(Instrumentation.class); + private final JsonParserMetrics jsonParserMetrics = new JsonParserMetrics(defaultConfig); + private static final TimeZone TZ = TimeZone.getTimeZone("UTC"); + private static final DateFormat DF = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'"); + static { + DF.setTimeZone(TZ); + } + + @Test + public void shouldReturnEmptyRecordsforEmptyList() { + MessageParser parser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + MessageSchema schema = null; + BigQuerySinkConfig bigQuerySinkConfig = null; + MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); + List emptyMessageList = Collections.emptyList(); + + Records records = converter.convert(emptyMessageList); + List emptyRecordList = Collections.emptyList(); + Records expectedRecords = new Records(emptyRecordList, emptyRecordList); + assertEquals(expectedRecords, records); + } + + @Test + public void shouldConvertJsonMessagesToRecordForLogMessage() { + MessageParser parser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + MessageSchema schema = null; + HashMap configMap = new HashMap<>(); + configMap.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_MESSAGE"); + BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); + MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); + List messages = new ArrayList<>(); + messages.add(getMessageForString("{ \"first_name\": \"john doe\"}")); + messages.add(getMessageForString("{ \"last_name\": \"walker\"}")); + + Records records = converter.convert(messages); + + List expectedValidRecords = new ArrayList<>(); + + Record validRecord1 = recordBuilder + .metadata(emptyMetadata) + .columns(ImmutableMap.of("first_name", "john doe")) + .index(0L) + .errorInfo(noError) + .build(); + + Record validRecord2 = recordBuilder + .metadata(emptyMetadata) + .columns(ImmutableMap.of("last_name", "walker")) + .index(1L) + .errorInfo(noError) + .build(); + expectedValidRecords.add(validRecord1); + expectedValidRecords.add(validRecord2); + List invalidRecords = Collections.emptyList(); + Records expectedRecords = new Records(expectedValidRecords, invalidRecords); + assertEquals(expectedRecords, records); + + } + + @Test + public void shouldConvertJsonMessagesToRecordForLogKey() { + MessageParser parser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + MessageSchema schema = null; + HashMap configMap = new HashMap<>(); + configMap.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_KEY"); + BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); + MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); + List messages = new ArrayList<>(); + messages.add(new Message("{ \"first_name\": \"john doe\"}".getBytes(), null)); + messages.add(new Message("{ \"last_name\": \"walker\"}".getBytes(), null)); + + Records records = converter.convert(messages); + + List expectedValidRecords = new ArrayList<>(); + + Record validRecord1 = recordBuilder + .metadata(emptyMetadata) + .columns(ImmutableMap.of("first_name", "john doe")) + .index(0L) + .errorInfo(noError) + .build(); + + Record validRecord2 = recordBuilder + .metadata(emptyMetadata) + .columns(ImmutableMap.of("last_name", "walker")) + .index(1L) + .errorInfo(noError) + .build(); + expectedValidRecords.add(validRecord1); + expectedValidRecords.add(validRecord2); + List invalidRecords = Collections.emptyList(); + Records expectedRecords = new Records(expectedValidRecords, invalidRecords); + assertEquals(expectedRecords, records); + + } + + @Test + public void shouldHandleBothInvalidAndValidJsonMessages() { + MessageParser parser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + MessageSchema schema = null; + HashMap configMap = new HashMap<>(); + configMap.put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_MESSAGE"); + BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); + MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); + List messages = new ArrayList<>(); + messages.add(getMessageForString("{ \"first_name\": \"john doe\"}")); + messages.add(getMessageForString("{ invalid json str")); + messages.add(getMessageForString("{ \"last_name\": \"walker\"}")); + messages.add(getMessageForString("another invalid message")); + String nestedJsonStr = "{\n" + + " \"event_value\": {\n" + + " \"CustomerLatitude\": \"-6.166895595817224\",\n" + + " \"fb_content_type\": \"product\"\n" + + " },\n" + + " \"ip\": \"210.210.175.250\",\n" + + " \"oaid\": null,\n" + + " \"event_time\": \"2022-05-06 08:03:43.561\",\n" + + " \"is_receipt_validated\": null,\n" + + " \"contributor_1_campaign\": null\n" + + "}"; + + messages.add(getMessageForString(nestedJsonStr)); + + Records records = converter.convert(messages); + + List expectedValidRecords = new ArrayList<>(); + Record validRecord1 = recordBuilder + .metadata(emptyMetadata) + .columns(ImmutableMap.of("first_name", "john doe")) + .index(0L) + .errorInfo(MessageRecordConverterForJsonTest.this.noError) + .build(); + + Record validRecord2 = recordBuilder + .metadata(emptyMetadata) + .columns(ImmutableMap.of("last_name", "walker")) + .index(2L) + .errorInfo(MessageRecordConverterForJsonTest.this.noError) + .build(); + + expectedValidRecords.add(validRecord1); + expectedValidRecords.add(validRecord2); + + ErrorInfo errorInfo = new ErrorInfo(null, ErrorType.DESERIALIZATION_ERROR); + ErrorInfo invalidMessageError = new ErrorInfo(null, ErrorType.INVALID_MESSAGE_ERROR); + List expectedInvalidRecords = new ArrayList<>(); + Record.RecordBuilder invalidRecordBuilder = recordBuilder.metadata(emptyMetadata) + .columns(emptyColumnsMap); + + Record invalidRecord1 = invalidRecordBuilder + .index(1L) + .errorInfo(errorInfo) + .build(); + + Record invalidRecord3 = invalidRecordBuilder + .index(3L) + .errorInfo(errorInfo) + .build(); + + Record invalidRecord4 = invalidRecordBuilder + .index(4L) + .errorInfo(invalidMessageError) + .build(); + + expectedInvalidRecords.add(invalidRecord1); + expectedInvalidRecords.add(invalidRecord3); + expectedInvalidRecords.add(invalidRecord4); + + assertEquals(expectedValidRecords, records.getValidRecords()); + + assertEquals(expectedInvalidRecords, records.getInvalidRecords()); + + } + + @Test + public void shouldInjectEventTimestamp() throws ParseException { + MessageParser parser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + MessageSchema schema = null; + Map configMap = ImmutableMap.of( + "SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "LOG_MESSAGE", + "SINK_CONNECTOR_SCHEMA_DATA_TYPE", "json", + "SINK_BIGQUERY_ADD_EVENT_TIMESTAMP_ENABLE", "true"); + + BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, configMap); + MessageRecordConverter converter = new MessageRecordConverter(parser, bigQuerySinkConfig, schema); + List messages = new ArrayList<>(); + messages.add(getMessageForString("{ \"first_name\": \"john doe\"}")); + messages.add(getMessageForString("{ \"last_name\": \"walker\"}")); + + Records actualRecords = converter.convert(messages); + + /* + * cant do assert equals because of timestamp value + * assertEquals(expectedRecords, records); + */ + assertThat(actualRecords.getInvalidRecords(), empty()); + assertEquals(2, actualRecords.getValidRecords().size()); + Record validRecord1 = actualRecords.getValidRecords().get(0); + assertNull(validRecord1.getErrorInfo()); + assertThat(validRecord1.getColumns(), hasEntry("first_name", "john doe")); + Record validRecord2 = actualRecords.getValidRecords().get(1); + assertNull(validRecord2.getErrorInfo()); + assertThat(validRecord2.getColumns(), hasEntry("last_name", "walker")); + + List dateTimeList = actualRecords + .getValidRecords() + .stream() + .map(k -> (String) k.getColumns().get("event_timestamp")) + .collect(Collectors.toList()); + long currentTimeMillis = System.currentTimeMillis(); + // assert that time stamp injected is recent by checking the difference to be + // less than 10 seconds + boolean timedifferenceForFirstDate = (currentTimeMillis + - DF.parse(dateTimeList.get(0)).getTime()) < 60000; + long timeDifferenceForSecondDate = currentTimeMillis - DF.parse(dateTimeList.get(1)).getTime(); + assertTrue("the difference is " + timedifferenceForFirstDate, timedifferenceForFirstDate); + assertTrue("the difference is " + timeDifferenceForSecondDate, timeDifferenceForSecondDate < 60000); + } + + private Message getMessageForString(String jsonStr) { + byte[] logMessage = jsonStr.getBytes(); + return new Message(null, logMessage); + } +} diff --git a/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterTest.java b/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterTest.java similarity index 62% rename from src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterTest.java rename to src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterTest.java index 39db1637..cd955a1c 100644 --- a/src/test/java/io/odpf/depot/bigquery/converter/MessageRecordConverterTest.java +++ b/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterTest.java @@ -1,22 +1,22 @@ -package io.odpf.depot.bigquery.converter; +package org.raystack.depot.bigquery.converter; import com.google.api.client.util.DateTime; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; import com.google.protobuf.UnknownFieldSet; -import io.odpf.depot.TestMessage; -import io.odpf.depot.bigquery.TestMetadata; -import io.odpf.depot.bigquery.TestOdpfMessageBuilder; -import io.odpf.depot.bigquery.models.Record; -import io.odpf.depot.bigquery.models.Records; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.message.*; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.message.proto.ProtoOdpfParsedMessage; -import io.odpf.stencil.client.ClassLoadStencilClient; +import org.raystack.depot.TestMessage; +import org.raystack.depot.message.*; +import org.raystack.depot.bigquery.TestMetadata; +import org.raystack.depot.bigquery.TestMessageBuilder; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.message.proto.ProtoParsedMessage; +import org.raystack.depot.bigquery.models.Record; +import org.raystack.depot.bigquery.models.Records; +import org.raystack.stencil.client.ClassLoadStencilClient; import org.aeonbits.owner.ConfigFactory; import org.junit.Before; import org.junit.Test; @@ -36,21 +36,23 @@ public class MessageRecordConverterTest { @Mock private ClassLoadStencilClient stencilClient; private Instant now; - private OdpfMessageSchema schema; + private MessageSchema schema; @Before public void setUp() throws IOException { - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestMessage"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestMessage"); System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); System.setProperty("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,message_topic=string,load_time=timestamp,message_timestamp=timestamp,message_partition=integer"); stencilClient = Mockito.mock(ClassLoadStencilClient.class, CALLS_REAL_METHODS); - Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - }}; - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); - schema = protoOdpfMessageParser.getSchema("io.odpf.depot.TestMessage", descriptorsMap); - recordConverter = new MessageRecordConverter(protoOdpfMessageParser, + Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + } + }; + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); + schema = protoMessageParser.getSchema("org.raystack.depot.TestMessage", descriptorsMap); + recordConverter = new MessageRecordConverter(protoMessageParser, ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()), schema); now = Instant.now(); @@ -58,25 +60,27 @@ public void setUp() throws IOException { @Test public void shouldGetRecordForBQFromConsumerRecords() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage record2 = TestOdpfMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", "order-details-2"); - + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); + Message record2 = TestMessageBuilder.withMetadata(record2Offset).createConsumerRecord("order-2", "order-url-2", + "order-details-2"); Map record1ExpectedColumns = new HashMap<>(); record1ExpectedColumns.put("order_number", "order-1"); record1ExpectedColumns.put("order_url", "order-url-1"); record1ExpectedColumns.put("order_details", "order-details-1"); - record1ExpectedColumns.putAll(TestOdpfMessageBuilder.metadataColumns(record1Offset, now)); - + record1ExpectedColumns.putAll(TestMessageBuilder.metadataColumns(record1Offset, now)); Map record2ExpectedColumns = new HashMap<>(); record2ExpectedColumns.put("order_number", "order-2"); record2ExpectedColumns.put("order_url", "order-url-2"); record2ExpectedColumns.put("order_details", "order-details-2"); - record2ExpectedColumns.putAll(TestOdpfMessageBuilder.metadataColumns(record2Offset, now)); - List messages = Arrays.asList(record1, record2); + record2ExpectedColumns.putAll(TestMessageBuilder.metadataColumns(record2Offset, now)); + List messages = Arrays.asList(record1, record2); Records records = recordConverter.convert(messages); @@ -91,19 +95,22 @@ public void shouldGetRecordForBQFromConsumerRecords() { @Test public void shouldIgnoreNullRecords() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage record2 = TestOdpfMessageBuilder.withMetadata(record2Offset).createEmptyValueConsumerRecord("order-2", "order-url-2"); - + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); + Message record2 = TestMessageBuilder.withMetadata(record2Offset).createEmptyValueConsumerRecord("order-2", + "order-url-2"); Map record1ExpectedColumns = new HashMap<>(); record1ExpectedColumns.put("order_number", "order-1"); record1ExpectedColumns.put("order_url", "order-url-1"); record1ExpectedColumns.put("order_details", "order-details-1"); - record1ExpectedColumns.putAll(TestOdpfMessageBuilder.metadataColumns(record1Offset, now)); + record1ExpectedColumns.putAll(TestMessageBuilder.metadataColumns(record1Offset, now)); - List messages = Arrays.asList(record1, record2); + List messages = Arrays.asList(record1, record2); Records records = recordConverter.convert(messages); assertEquals(1, records.getValidRecords().size()); @@ -114,18 +121,22 @@ public void shouldIgnoreNullRecords() { @Test public void shouldReturnInvalidRecordsWhenGivenNullRecords() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage record2 = TestOdpfMessageBuilder.withMetadata(record2Offset).createEmptyValueConsumerRecord("order-2", "order-url-2"); + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); + Message record2 = TestMessageBuilder.withMetadata(record2Offset).createEmptyValueConsumerRecord("order-2", + "order-url-2"); Map record1ExpectedColumns = new HashMap<>(); record1ExpectedColumns.put("order_number", "order-1"); record1ExpectedColumns.put("order_url", "order-url-1"); record1ExpectedColumns.put("order_details", "order-details-1"); - record1ExpectedColumns.putAll(TestOdpfMessageBuilder.metadataColumns(record1Offset, now)); + record1ExpectedColumns.putAll(TestMessageBuilder.metadataColumns(record1Offset, now)); - List messages = Arrays.asList(record1, record2); + List messages = Arrays.asList(record1, record2); Records records = recordConverter.convert(messages); assertEquals(1, records.getValidRecords().size()); @@ -137,19 +148,21 @@ public void shouldReturnInvalidRecordsWhenGivenNullRecords() { @Test public void shouldNotNamespaceMetadataFieldWhenNamespaceIsNotProvided() { BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); - MessageRecordConverter recordConverterTest = new MessageRecordConverter(protoOdpfMessageParser, sinkConfig, schema); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); + MessageRecordConverter recordConverterTest = new MessageRecordConverter(protoMessageParser, sinkConfig, schema); - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); Map record1ExpectedColumns = new HashMap<>(); record1ExpectedColumns.put("order_number", "order-1"); record1ExpectedColumns.put("order_url", "order-url-1"); record1ExpectedColumns.put("order_details", "order-details-1"); - record1ExpectedColumns.putAll(TestOdpfMessageBuilder.metadataColumns(record1Offset, now)); + record1ExpectedColumns.putAll(TestMessageBuilder.metadataColumns(record1Offset, now)); - List messages = Collections.singletonList(record1); + List messages = Collections.singletonList(record1); Records records = recordConverterTest.convert(messages); assertEquals(messages.size(), records.getValidRecords().size()); @@ -163,19 +176,22 @@ public void shouldNotNamespaceMetadataFieldWhenNamespaceIsNotProvided() { public void shouldNamespaceMetadataFieldWhenNamespaceIsProvided() { System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "metadata_ns"); BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); - MessageRecordConverter recordConverterTest = new MessageRecordConverter(protoOdpfMessageParser, sinkConfig, schema); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); + MessageRecordConverter recordConverterTest = new MessageRecordConverter(protoMessageParser, sinkConfig, schema); - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", + "order-details-1"); Map record1ExpectedColumns = new HashMap<>(); record1ExpectedColumns.put("order_number", "order-1"); record1ExpectedColumns.put("order_url", "order-url-1"); record1ExpectedColumns.put("order_details", "order-details-1"); - record1ExpectedColumns.put(sinkConfig.getBqMetadataNamespace(), TestOdpfMessageBuilder.metadataColumns(record1Offset, now)); + record1ExpectedColumns.put(sinkConfig.getBqMetadataNamespace(), + TestMessageBuilder.metadataColumns(record1Offset, now)); - List messages = Collections.singletonList(record1); + List messages = Collections.singletonList(record1); Records records = recordConverterTest.convert(messages); assertEquals(messages.size(), records.getValidRecords().size()); @@ -185,29 +201,32 @@ public void shouldNamespaceMetadataFieldWhenNamespaceIsProvided() { System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); } - @Test public void shouldReturnInvalidRecordsGivenInvalidProtobufMessage() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage record2 = new OdpfMessage("invalid-key".getBytes(), "invalid-value".getBytes(), + Message record2 = new Message("invalid-key".getBytes(), "invalid-value".getBytes(), new Tuple<>("topic", record2Offset.getTopic()), new Tuple<>("partition", record2Offset.getPartition())); - List messages = Arrays.asList(record1, record2); + List messages = Arrays.asList(record1, record2); Records records = recordConverter.convert(messages); assertEquals(1, records.getInvalidRecords().size()); } @Test public void shouldWriteToErrorWriterInvalidRecords() { - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), now.toEpochMilli()); - TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), now.toEpochMilli()); - OdpfMessage record1 = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + now.toEpochMilli()); + TestMetadata record2Offset = new TestMetadata("topic1", 2, 102, Instant.now().toEpochMilli(), + now.toEpochMilli()); + Message record1 = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); - OdpfMessage record2 = new OdpfMessage("invalid-key".getBytes(), "invalid-value".getBytes(), + Message record2 = new Message("invalid-key".getBytes(), "invalid-value".getBytes(), new Tuple<>("message_topic", record2Offset.getTopic()), new Tuple<>("message_partition", record2Offset.getPartition()), new Tuple<>("message_offset", record2Offset.getOffset()), @@ -218,9 +237,9 @@ public void shouldWriteToErrorWriterInvalidRecords() { record1ExpectedColumns.put("order_number", "order-1"); record1ExpectedColumns.put("order_url", "order-url-1"); record1ExpectedColumns.put("order_details", "order-details-1"); - record1ExpectedColumns.putAll(TestOdpfMessageBuilder.metadataColumns(record1Offset, now)); + record1ExpectedColumns.putAll(TestMessageBuilder.metadataColumns(record1Offset, now)); - List messages = Arrays.asList(record1, record2); + List messages = Arrays.asList(record1, record2); Records records = recordConverter.convert(messages); assertEquals(1, records.getValidRecords().size()); @@ -236,10 +255,11 @@ public void shouldWriteToErrorWriterInvalidRecords() { @Test public void shouldReturnInvalidRecordsWhenUnknownFieldsFound() throws IOException { System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "false"); - OdpfMessageParser mockParser = mock(OdpfMessageParser.class); + MessageParser mockParser = mock(MessageParser.class); - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - OdpfMessage consumerRecord = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + Message consumerRecord = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); DynamicMessage dynamicMessage = DynamicMessage.newBuilder(TestMessage.getDescriptor()) @@ -247,12 +267,14 @@ public void shouldReturnInvalidRecordsWhenUnknownFieldsFound() throws IOExceptio .addField(1, UnknownFieldSet.Field.getDefaultInstance()) .build()) .build(); - ParsedOdpfMessage parsedOdpfMessage = new ProtoOdpfParsedMessage(dynamicMessage); - when(mockParser.parse(consumerRecord, SinkConnectorSchemaMessageMode.LOG_MESSAGE, "io.odpf.depot.TestMessage")).thenReturn(parsedOdpfMessage); + ParsedMessage parsedMessage = new ProtoParsedMessage(dynamicMessage); + when(mockParser.parse(consumerRecord, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + "org.raystack.depot.TestMessage")).thenReturn(parsedMessage); - recordConverter = new MessageRecordConverter(mockParser, ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()), schema); + recordConverter = new MessageRecordConverter(mockParser, + ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()), schema); - List messages = Collections.singletonList(consumerRecord); + List messages = Collections.singletonList(consumerRecord); Records records = recordConverter.convert(messages); assertEquals(0, records.getValidRecords().size()); @@ -264,10 +286,11 @@ public void shouldReturnInvalidRecordsWhenUnknownFieldsFound() throws IOExceptio @Test public void shouldIgnoreUnknownFieldsIfTheConfigIsSet() throws IOException { System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_ALLOW_UNKNOWN_FIELDS_ENABLE", "true"); - OdpfMessageParser mockParser = mock(OdpfMessageParser.class); + MessageParser mockParser = mock(MessageParser.class); - TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), Instant.now().toEpochMilli()); - OdpfMessage consumerRecord = TestOdpfMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", + TestMetadata record1Offset = new TestMetadata("topic1", 1, 101, Instant.now().toEpochMilli(), + Instant.now().toEpochMilli()); + Message consumerRecord = TestMessageBuilder.withMetadata(record1Offset).createConsumerRecord("order-1", "order-url-1", "order-details-1"); DynamicMessage dynamicMessage = DynamicMessage.newBuilder(TestMessage.getDescriptor()) @@ -275,27 +298,29 @@ public void shouldIgnoreUnknownFieldsIfTheConfigIsSet() throws IOException { .addField(1, UnknownFieldSet.Field.getDefaultInstance()) .build()) .build(); - ParsedOdpfMessage parsedOdpfMessage = new ProtoOdpfParsedMessage(dynamicMessage); - when(mockParser.parse(consumerRecord, SinkConnectorSchemaMessageMode.LOG_MESSAGE, "io.odpf.depot.TestMessage")).thenReturn(parsedOdpfMessage); + ParsedMessage parsedMessage = new ProtoParsedMessage(dynamicMessage); + when(mockParser.parse(consumerRecord, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + "org.raystack.depot.TestMessage")).thenReturn(parsedMessage); recordConverter = new MessageRecordConverter(mockParser, ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()), schema); - List messages = Collections.singletonList(consumerRecord); + List messages = Collections.singletonList(consumerRecord); Records records = recordConverter.convert(messages); BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); List metadataColumnsTypes = config.getMetadataColumnsTypes(); Map metadata = consumerRecord.getMetadata(); - Map finalMetadata = metadataColumnsTypes.stream().collect(Collectors.toMap(TupleString::getFirst, t -> { - String key = t.getFirst(); - String dataType = t.getSecond(); - Object value = metadata.get(key); - if (value instanceof Long && dataType.equals("timestamp")) { - value = new DateTime((long) value); - } - return value; - })); + Map finalMetadata = metadataColumnsTypes.stream() + .collect(Collectors.toMap(TupleString::getFirst, t -> { + String key = t.getFirst(); + String dataType = t.getSecond(); + Object value = metadata.get(key); + if (value instanceof Long && dataType.equals("timestamp")) { + value = new DateTime((long) value); + } + return value; + })); Record record = new Record(consumerRecord.getMetadata(), finalMetadata, 0, null); assertEquals(1, records.getValidRecords().size()); assertEquals(0, records.getInvalidRecords().size()); diff --git a/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterUtilsTest.java b/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterUtilsTest.java new file mode 100644 index 00000000..dc8d7b07 --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/converter/MessageRecordConverterUtilsTest.java @@ -0,0 +1,64 @@ +package org.raystack.depot.bigquery.converter; + +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.Message; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.HashMap; +import java.util.Map; + +public class MessageRecordConverterUtilsTest { + + @Test + public void shouldAddMetaData() { + Map columns = new HashMap() { + { + put("test", 123); + } + }; + Message message = Mockito.mock(Message.class); + Mockito.when(message.getMetadata(Mockito.any())).thenReturn(new HashMap() { + { + put("test2", "value2"); + put("something", 99L); + put("nvm", "nvm"); + } + }); + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, new HashMap() { + { + put("SINK_BIGQUERY_ADD_METADATA_ENABLED", "true"); + put("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "test2=string,something=long,nvm=string"); + } + }); + MessageRecordConverterUtils.addMetadata(columns, message, config); + Assert.assertEquals(new HashMap() { + { + put("test", 123); + put("test2", "value2"); + put("something", 99L); + put("nvm", "nvm"); + } + }, columns); + } + + @Test + public void shouldAddTimeStampForJson() { + Map columns = new HashMap() { + { + put("test", 123); + } + }; + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, new HashMap() { + { + put("SINK_CONNECTOR_SCHEMA_DATA_TYPE", "json"); + put("SINK_BIGQUERY_ADD_EVENT_TIMESTAMP_ENABLE", "true"); + } + }); + MessageRecordConverterUtils.addTimeStampColumnForJson(columns, config); + Assert.assertEquals(2, columns.size()); + Assert.assertNotNull(columns.get("event_timestamp")); + } +} diff --git a/src/test/java/org/raystack/depot/bigquery/handler/JsonErrorHandlerTest.java b/src/test/java/org/raystack/depot/bigquery/handler/JsonErrorHandlerTest.java new file mode 100644 index 00000000..bef9a1e5 --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/handler/JsonErrorHandlerTest.java @@ -0,0 +1,577 @@ +package org.raystack.depot.bigquery.handler; + +import com.google.api.client.util.DateTime; +import com.google.cloud.bigquery.BigQueryError; +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.Schema; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.models.Record; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; +import org.mockito.Captor; +import org.mockito.Mock; +import org.mockito.MockitoAnnotations; + +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import static java.util.Arrays.asList; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +public class JsonErrorHandlerTest { + + private final Schema emptyTableSchema = Schema.of(); + + private final BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, + Collections.emptyMap()); + + @Mock + private BigQueryClient bigQueryClient; + + @Captor + private ArgumentCaptor> fieldsArgumentCaptor; + + @Mock + private Instrumentation instrumentation; + + @Before + public void setUp() throws Exception { + MockitoAnnotations.initMocks(this); + } + + private Field getField(String name, LegacySQLTypeName type) { + return Field.newBuilder(name, type).setMode(Field.Mode.NULLABLE).build(); + } + + @Test + public void shouldUpdateTableFieldsOnSchemaError() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + BigQueryError bigQueryError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + Map> insertErrors = ImmutableMap.of(0L, Collections.singletonList(bigQueryError)); + + Record validRecord = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + List records = ImmutableList.of(validRecord); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); + + jsonErrorHandler.handle(insertErrors, records); + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(firstName)); + } + + @Test + public void shouldNotUpdateTableWhenNoSchemaError() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + BigQueryError serverError = new BigQueryError("otherresons", "planet eart", "server error"); + BigQueryError anotherError = new BigQueryError("otherresons", "planet eart", "server error"); + Map> insertErrors = ImmutableMap.of(0L, asList(serverError, anotherError)); + + Record validRecord = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + List records = ImmutableList.of(validRecord); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); + jsonErrorHandler.handle(insertErrors, records); + + verify(bigQueryClient, never()).upsertTable(any()); + + } + + @Test + public void shouldUpdateTableFieldsForMultipleRecords() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + + BigQueryError firstNameNotFoundError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + BigQueryError anotherError = new BigQueryError("otherresons", "planet eart", "some error"); + BigQueryError lastNameNotFoundError = new BigQueryError("invalid", "first_name", "no such field: last_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, asList(firstNameNotFoundError, anotherError), + 1L, Collections.singletonList(lastNameNotFoundError)); + + + Record validRecordWithFirstName = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + Map columnsMapWithLastName = ImmutableMap.of("last_name", "john carmack"); + Record validRecordWithLastName = Record.builder() + .columns(columnsMapWithLastName) + .build(); + + List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(firstName, lastName)); + } + + @Test + public void shouldIngoreRecordsWhichHaveOtherErrors() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + BigQueryError otherError = new BigQueryError("otherresons", "planet eart", "server error"); + Map> errorInfoMap = ImmutableMap.of( + 1L, asList(noSuchFieldError, otherError), + 0L, Collections.singletonList(otherError)); + + Record validRecordWithFirstName = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + Record validRecordWithLastName = Record.builder() + .columns(ImmutableMap.of("last_name", "john carmack")) + .build(); + + List validRecords = asList(validRecordWithFirstName, validRecordWithLastName); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(lastName)); + } + + @Test + public void shouldIngoreRecordsWithNoErrors() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of(1L, Collections.singletonList(noSuchFieldError)); + + Record validRecordWithFirstName = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + Record validRecordWithLastName = Record.builder() + .columns(ImmutableMap.of("last_name", "john carmack")) + .build(); + + List validRecords = asList(validRecordWithFirstName, validRecordWithLastName); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(lastName)); + } + + @Test + public void shouldUpdateOnlyUniqueFields() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError), + 2L, Collections.singletonList(noSuchFieldError)); + + Record validRecordWithFirstName = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + Map columnsMapWithLastName = ImmutableMap.of("last_name", "john carmack"); + Record validRecordWithLastName = Record.builder() + .columns(columnsMapWithLastName) + .build(); + Record anotheRecordWithLastName = Record.builder() + .columns(columnsMapWithLastName) + .build(); + + List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName, anotheRecordWithLastName); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(firstName, lastName)); + } + + @Test + public void shouldUpdatWithBothMissingFieldsAndExistingTableFields() { + // existing table fields + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + + Schema nonEmptyTableSchema = Schema.of(firstName, lastName); + when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", + "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError), + 2L, Collections.singletonList(noSuchFieldError)); + + Map columnsMapWithFistName = ImmutableMap.of( + "first_name", "john doe", + "newFieldAddress", "planet earth"); + Record validRecordWithFirstName = Record.builder() + .columns(columnsMapWithFistName) + .build(); + + Record validRecordWithLastName = Record.builder() + .columns(ImmutableMap.of("newFieldDog", "golden retriever")) + .build(); + Record anotheRecordWithLastName = Record.builder() + .columns(ImmutableMap.of("newFieldDog", "golden retriever")) + .build(); + + List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName, + anotheRecordWithLastName); + + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, bigQuerySinkConfig, + instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + // missing fields + Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); + Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); + + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(firstName, lastName, newFieldDog, newFieldAddress)); + } + + @Test + public void shouldUpsertTableWithPartitionKey() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError)); + + Record validRecordWithFirstName = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + Map columnsMapWithTimestamp = ImmutableMap.of( + "last_name", "john carmack", + "event_timestamp_partition", "today's date"); + Record validRecordWithLastName = Record.builder().columns(columnsMapWithTimestamp).build(); + + List validRecords = ImmutableList.of(validRecordWithFirstName, validRecordWithLastName); + + Map envMap = ImmutableMap.of( + "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true", + "SINK_BIGQUERY_TABLE_PARTITION_KEY", "event_timestamp_partition", + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp_partition=timestamp"); + BigQuerySinkConfig partitionKeyConfig = ConfigFactory.create(BigQuerySinkConfig.class, envMap); + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, partitionKeyConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field eventTimestamp = getField("event_timestamp_partition", LegacySQLTypeName.TIMESTAMP); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, containsInAnyOrder(firstName, lastName, eventTimestamp)); + } + + @Test + public void shouldThrowExceptionWhenCastFieldsToStringNotTrue() { + when(bigQueryClient.getSchema()).thenReturn(emptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of(0L, Collections.singletonList(noSuchFieldError)); + + Record validRecord = Record.builder() + .columns(ImmutableMap.of("first_name", "john doe")) + .build(); + + List records = Collections.singletonList(validRecord); + BigQuerySinkConfig stringDisableConfig = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false")); + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, stringDisableConfig, instrumentation); + assertThrows(UnsupportedOperationException.class, () -> jsonErrorHandler.handle(errorInfoMap, records)); + + verify(bigQueryClient, never()).upsertTable(any()); + } + + @Test + public void shouldUpdateMissingMetadataFields() { + // existing table fields + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + + Schema nonEmptyTableSchema = Schema.of(firstName, lastName); + when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", + "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError), + 2L, Collections.singletonList(noSuchFieldError)); + + Map columnsMapWithFistName = ImmutableMap.of( + "first_name", "john doe", + "newFieldAddress", "planet earth", + "message_offset", 111, + "load_time", new DateTime(System.currentTimeMillis())); + Record validRecordWithFirstName = Record.builder() + .columns(columnsMapWithFistName) + .metadata(ImmutableMap.of( + "message_offset", 111, + "load_time", new DateTime(System.currentTimeMillis()))) + .build(); + + Map columnsMapWithNewFieldDog = ImmutableMap.of( + "newFieldDog", "golden retriever", + "load_time", new DateTime(System.currentTimeMillis()), + "message_offset", 11); + Record validRecordWithLastName = Record.builder() + .columns(columnsMapWithNewFieldDog) + .metadata(ImmutableMap.of( + "load_time", new DateTime(System.currentTimeMillis()), + "message_offset", 11)) + .build(); + Record anotherRecordWithLastName = Record.builder() + .columns(columnsMapWithNewFieldDog) + .build(); + + List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, + anotherRecordWithLastName); + + Map config = ImmutableMap.of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,load_time=timestamp"); + BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + // missing fields + Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); + Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); + + Field messageOffset = getField("message_offset", LegacySQLTypeName.INTEGER); + Field loadTime = getField("load_time", LegacySQLTypeName.TIMESTAMP); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, + containsInAnyOrder(messageOffset, loadTime, firstName, lastName, newFieldDog, + newFieldAddress)); + } + + @Test + public void shouldUpdateMissingMetadataFieldsAndDefaultColumns() { + // existing table fields + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + + Schema nonEmptyTableSchema = Schema.of(firstName, lastName); + when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", + "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError), + 2L, Collections.singletonList(noSuchFieldError)); + + Map columnsMapWithFistName = ImmutableMap.of( + "first_name", "john doe", + "newFieldAddress", "planet earth", + "depot", 123, + "message_offset", 111, + "load_time", new DateTime(System.currentTimeMillis())); + Record validRecordWithFirstName = Record.builder() + .columns(columnsMapWithFistName) + .metadata(ImmutableMap.of( + "message_offset", 111, + "load_time", new DateTime(System.currentTimeMillis()))) + .build(); + + Map columnsMapWithNewFieldDog = ImmutableMap.of( + "newFieldDog", "golden retriever", + "load_time", new DateTime(System.currentTimeMillis()), + "message_offset", 11); + Record validRecordWithLastName = Record.builder() + .columns(columnsMapWithNewFieldDog) + .metadata(ImmutableMap.of( + "load_time", new DateTime(System.currentTimeMillis()), + "message_offset", 11)) + .build(); + Record anotheRecordWithLastName = Record.builder() + .columns(columnsMapWithNewFieldDog) + .build(); + + List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, + anotheRecordWithLastName); + + Map config = ImmutableMap.of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,load_time=timestamp", + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp_partition=timestamp,depot=integer"); + BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + // missing fields + Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); + Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); + + Field messageOffset = getField("message_offset", LegacySQLTypeName.INTEGER); + Field loadTime = getField("load_time", LegacySQLTypeName.TIMESTAMP); + Field depot = getField("depot", LegacySQLTypeName.INTEGER); + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, + containsInAnyOrder(messageOffset, loadTime, firstName, lastName, newFieldDog, + newFieldAddress, depot)); + } + + @Test + public void shouldNotAddMetadataFieldsWhenDisabled() { + // existing table fields + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + + Schema nonEmptyTableSchema = Schema.of(firstName, lastName); + when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", + "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError), + 2L, Collections.singletonList(noSuchFieldError)); + + Map columnsMapWithFistName = ImmutableMap.of( + "first_name", "john doe", + "newFieldAddress", "planet earth"); + Record validRecordWithFirstName = Record.builder() + .columns(columnsMapWithFistName) + .metadata(ImmutableMap.of( + "message_offset", 111, + "load_time", new DateTime(System.currentTimeMillis()))) + .build(); + + Record validRecordWithLastName = Record.builder() + .columns(ImmutableMap.of( + "newFieldDog", "golden retriever")) + .metadata(ImmutableMap.of( + "load_time", new DateTime(System.currentTimeMillis()), + "message_offset", 11)) + .build(); + Record anotheRecordWithLastName = Record.builder() + .columns(ImmutableMap.of( + "newFieldDog", "german sheppperd")) + .build(); + + List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, + anotheRecordWithLastName); + + Map config = ImmutableMap + .of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,load_time=timestamp", + "SINK_BIGQUERY_ADD_METADATA_ENABLED", "false"); + BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); + jsonErrorHandler.handle(errorInfoMap, validRecords); + + verify(bigQueryClient, times(1)).upsertTable(fieldsArgumentCaptor.capture()); + + // missing fields + Field newFieldDog = getField("newFieldDog", LegacySQLTypeName.STRING); + Field newFieldAddress = getField("newFieldAddress", LegacySQLTypeName.STRING); + + List actualFields = fieldsArgumentCaptor.getValue(); + assertThat(actualFields, + containsInAnyOrder(firstName, lastName, newFieldDog, newFieldAddress)); + } + + @Test + public void shouldThrowErrorForNamespacedMetadataNotSupported() { + // existing table fields + Field lastName = getField("last_name", LegacySQLTypeName.STRING); + Field firstName = getField("first_name", LegacySQLTypeName.STRING); + + Schema nonEmptyTableSchema = Schema.of(firstName, lastName); + when(bigQueryClient.getSchema()).thenReturn(nonEmptyTableSchema); + + BigQueryError noSuchFieldError = new BigQueryError("invalid", "first_name", + "no such field: first_name"); + Map> errorInfoMap = ImmutableMap.of( + 0L, Collections.singletonList(noSuchFieldError), + 1L, Collections.singletonList(noSuchFieldError), + 2L, Collections.singletonList(noSuchFieldError)); + + Map columnsMapWithFistName = ImmutableMap.of( + "first_name", "john doe", + "newFieldAddress", "planet earth", + "message_offset", 111); + Record validRecordWithFirstName = Record.builder() + .columns(columnsMapWithFistName) + .build(); + + Map columnsMapWithNewFieldDog = ImmutableMap.of( + "newFieldDog", "golden retriever", + "load_time", new DateTime(System.currentTimeMillis())); + Record validRecordWithLastName = Record.builder() + .columns(columnsMapWithNewFieldDog) + .build(); + Record anotheRecordWithLastName = Record.builder() + .columns(columnsMapWithNewFieldDog) + .build(); + + List validRecords = asList(validRecordWithFirstName, validRecordWithLastName, + anotheRecordWithLastName); + + Map config = ImmutableMap.of("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,load_time=timestamp", + "SINK_BIGQUERY_METADATA_NAMESPACE", "hello_world_namespace"); + BigQuerySinkConfig sinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, config); + JsonErrorHandler jsonErrorHandler = new JsonErrorHandler(bigQueryClient, sinkConfig, instrumentation); + assertThrows(UnsupportedOperationException.class, + () -> jsonErrorHandler.handle(errorInfoMap, validRecords)); + verify(bigQueryClient, never()).upsertTable(any()); + } +} diff --git a/src/test/java/org/raystack/depot/bigquery/json/BigqueryJsonUpdateListenerTest.java b/src/test/java/org/raystack/depot/bigquery/json/BigqueryJsonUpdateListenerTest.java new file mode 100644 index 00000000..173694d6 --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/json/BigqueryJsonUpdateListenerTest.java @@ -0,0 +1,234 @@ +package org.raystack.depot.bigquery.json; + +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.cloud.bigquery.Schema; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableMap; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.bigquery.converter.MessageRecordConverter; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.metrics.Instrumentation; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; + +import java.util.Collections; +import java.util.List; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.assertThrows; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +public class BigqueryJsonUpdateListenerTest { + + private MessageRecordConverterCache converterCache; + private BigQueryClient mockBqClient; + private Instrumentation instrumentation; + + @Before + public void setUp() throws Exception { + converterCache = mock(MessageRecordConverterCache.class); + mockBqClient = mock(BigQueryClient.class); + Schema emptySchema = Schema.of(); + when(mockBqClient.getSchema()).thenReturn(emptySchema); + instrumentation = mock(Instrumentation.class); + } + + @Test + public void shouldSetMessageRecordConverterAndUpsertTable() { + BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, + Collections.emptyMap()); + BigqueryJsonUpdateListener updateListener = new BigqueryJsonUpdateListener(bigQuerySinkConfig, + converterCache, mockBqClient, instrumentation); + updateListener.setMessageParser(null); + updateListener.updateSchema(); + verify(converterCache, times(1)).setMessageRecordConverter(any(MessageRecordConverter.class)); + verify(mockBqClient, times(1)).upsertTable(Collections.emptyList()); + } + + @Test + public void shouldCreateTableWithDefaultColumns() { + + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", + "SINK_CONNECTOR_DEFAULT_DATATYPE_STRING_ENABLE", "false")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + bigqueryJsonUpdateListener.updateSchema(); + List bqSchemaFields = ImmutableList.of( + Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE) + .build()); + verify(mockBqClient, times(1)).upsertTable(bqSchemaFields); + } + + @Test + public void shouldCreateTableWithDefaultColumnsAndMetadataFields() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false", + "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,message_topic=string,message_timestamp=timestamp", + "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + bigqueryJsonUpdateListener.updateSchema(); + List bqSchemaFields = ImmutableList.of( + Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE) + .build(), + Field.newBuilder("message_offset", LegacySQLTypeName.INTEGER) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("message_topic", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE) + .build(), + Field.newBuilder("message_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build()); + ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); + verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); + assertThat(listArgumentCaptor.getValue(), containsInAnyOrder(bqSchemaFields.toArray())); + } + + @Test + public void shouldCreateTableWithDefaultColumnsWithDdifferentTypesAndMetadataFields() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=integer", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true", + "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,message_topic=string,message_timestamp=timestamp", + "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + bigqueryJsonUpdateListener.updateSchema(); + List bqSchemaFields = ImmutableList.of( + Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("first_name", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE) + .build(), + Field.newBuilder("message_offset", LegacySQLTypeName.INTEGER) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("message_topic", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE) + .build(), + Field.newBuilder("message_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build()); + ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); + verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); + assertThat(listArgumentCaptor.getValue(), containsInAnyOrder(bqSchemaFields.toArray())); + } + + @Test + public void shouldNotAddMetadataFields() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false", + "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,message_topic=string,message_timestamp=timestamp", + "SINK_BIGQUERY_ADD_METADATA_ENABLED", "false")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + bigqueryJsonUpdateListener.updateSchema(); + List bqSchemaFields = ImmutableList.of( + Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE) + .build()); + ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); + verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); + assertThat(listArgumentCaptor.getValue(), containsInAnyOrder(bqSchemaFields.toArray())); + } + + @Test + public void shouldThrowErrorIfDefaultColumnsAndMetadataFieldsContainSameEntryCalledFirstName() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false", + "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,first_name=integer", + "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + assertThrows(IllegalArgumentException.class, bigqueryJsonUpdateListener::updateSchema); + } + + @Test + public void shouldThrowErrorIfMetadataNamespaceIsNotEmpty() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "message_offset=integer,first_name=integer", + "SINK_BIGQUERY_ADD_METADATA_ENABLED", "true", + "SINK_BIGQUERY_METADATA_NAMESPACE", "metadata_namespace")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + assertThrows(UnsupportedOperationException.class, bigqueryJsonUpdateListener::updateSchema); + } + + @Test + public void shouldCreateTableWithDefaultColumnsAndExistingTableColumns() { + Field existingField1 = Field.of("existing_field1", LegacySQLTypeName.STRING); + Field existingField2 = Field.of("existing_field2", LegacySQLTypeName.STRING); + when(mockBqClient.getSchema()).thenReturn(Schema.of(existingField1, + existingField2)); + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "false")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + bigqueryJsonUpdateListener.updateSchema(); + ArgumentCaptor> listArgumentCaptor = ArgumentCaptor.forClass(List.class); + verify(mockBqClient, times(1)).upsertTable(listArgumentCaptor.capture()); + List actualFields = listArgumentCaptor.getValue(); + Field eventTimestampField = Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build(); + Field firstNameField = Field.newBuilder("first_name", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE).build(); + assertThat(actualFields, containsInAnyOrder(eventTimestampField, firstNameField, existingField1, + existingField2)); + } + + @Test + public void shouldNotCastPartitionKeyToString() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=timestamp,first_name=string", + "SINK_BIGQUERY_TABLE_PARTITION_KEY", "event_timestamp", + "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + bigqueryJsonUpdateListener.updateSchema(); + List bqSchemaFields = ImmutableList.of( + Field.newBuilder("event_timestamp", LegacySQLTypeName.TIMESTAMP) + .setMode(Field.Mode.NULLABLE).build(), + Field.newBuilder("first_name", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE) + .build()); + verify(mockBqClient, times(1)).upsertTable(bqSchemaFields); + } + + @Test + public void shouldThrowErrorWhenPartitionKeyTypeIsNotCorrect() { + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, ImmutableMap.of( + "SINK_BIGQUERY_DEFAULT_COLUMNS", "event_timestamp=integer,first_name=string", + "SINK_BIGQUERY_TABLE_PARTITION_KEY", "event_timestamp", + "SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true", + "SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true")); + BigqueryJsonUpdateListener bigqueryJsonUpdateListener = new BigqueryJsonUpdateListener(config, + converterCache, mockBqClient, instrumentation); + assertThrows(UnsupportedOperationException.class, bigqueryJsonUpdateListener::updateSchema); + } + + @Test + public void shouldThrowExceptionWhenDynamicSchemaNotEnabled() { + BigQuerySinkConfig bigQuerySinkConfig = ConfigFactory.create(BigQuerySinkConfig.class, + ImmutableMap.of("SINK_BIGQUERY_DYNAMIC_SCHEMA_ENABLE", "false")); + assertThrows(UnsupportedOperationException.class, + () -> new BigqueryJsonUpdateListener(bigQuerySinkConfig, converterCache, mockBqClient, + instrumentation)); + + } +} diff --git a/src/test/java/io/odpf/depot/bigquery/models/BQFieldTest.java b/src/test/java/org/raystack/depot/bigquery/models/BQFieldTest.java similarity index 94% rename from src/test/java/io/odpf/depot/bigquery/models/BQFieldTest.java rename to src/test/java/org/raystack/depot/bigquery/models/BQFieldTest.java index 43a2e2e9..b7600049 100644 --- a/src/test/java/io/odpf/depot/bigquery/models/BQFieldTest.java +++ b/src/test/java/org/raystack/depot/bigquery/models/BQFieldTest.java @@ -1,12 +1,12 @@ -package io.odpf.depot.bigquery.models; +package org.raystack.depot.bigquery.models; import com.google.cloud.bigquery.Field; import com.google.cloud.bigquery.FieldList; import com.google.cloud.bigquery.LegacySQLTypeName; import com.google.protobuf.Descriptors; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestTypesMessage; -import io.odpf.depot.message.proto.ProtoField; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestTypesMessage; +import org.raystack.depot.message.proto.ProtoField; import org.junit.Test; import java.util.ArrayList; @@ -16,7 +16,8 @@ public class BQFieldTest { - private Descriptors.Descriptor testMessageDescriptor = TestTypesMessage.newBuilder().build().getDescriptorForType(); + private final Descriptors.Descriptor testMessageDescriptor = TestTypesMessage.newBuilder().build() + .getDescriptorForType(); @Test public void shouldReturnBigqueryField() { @@ -46,19 +47,22 @@ public void shouldReturnBigqueryFieldWithChildField() { bqField.setSubFields(childFields); Field field = bqField.getField(); - Field expectedOrderNumberBqField = Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - Field expectedOrderNumberBqFieldUrl = Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - Field expectedOrderDetailsBqField1 = Field.newBuilder("order_details", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); + Field expectedOrderNumberBqField = Field.newBuilder("order_number", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE).build(); + Field expectedOrderNumberBqFieldUrl = Field.newBuilder("order_url", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE).build(); + Field expectedOrderDetailsBqField1 = Field.newBuilder("order_details", LegacySQLTypeName.STRING) + .setMode(Field.Mode.NULLABLE).build(); Field expected = Field.newBuilder(fieldName, LegacySQLTypeName.RECORD, FieldList.of(expectedOrderNumberBqField, expectedOrderNumberBqFieldUrl, - expectedOrderDetailsBqField1)).setMode(Field.Mode.NULLABLE).build(); + expectedOrderDetailsBqField1)) + .setMode(Field.Mode.NULLABLE).build(); assertEquals(expected, field); } - @Test public void shouldConvertProtobufTimestampToBigqueryTimestamp() { String fieldName = "timestamp_value"; @@ -91,7 +95,6 @@ public void shouldConvertProtobufDurationToBigqueryRecord() { assertEquals(LegacySQLTypeName.RECORD, bqFieldType); } - @Test public void shouldConvertProtobufDoubleToBigqueryFloat() { String fieldName = "double_value"; diff --git a/src/test/java/io/odpf/depot/bigquery/models/ProtoFieldTest.java b/src/test/java/org/raystack/depot/bigquery/models/ProtoFieldTest.java similarity index 78% rename from src/test/java/io/odpf/depot/bigquery/models/ProtoFieldTest.java rename to src/test/java/org/raystack/depot/bigquery/models/ProtoFieldTest.java index de09aab1..00548408 100644 --- a/src/test/java/io/odpf/depot/bigquery/models/ProtoFieldTest.java +++ b/src/test/java/org/raystack/depot/bigquery/models/ProtoFieldTest.java @@ -1,9 +1,9 @@ -package io.odpf.depot.bigquery.models; +package org.raystack.depot.bigquery.models; import com.google.protobuf.DescriptorProtos; import com.google.protobuf.Descriptors; -import io.odpf.depot.TestTypesMessage; -import io.odpf.depot.message.proto.ProtoField; +import org.raystack.depot.TestTypesMessage; +import org.raystack.depot.message.proto.ProtoField; import org.junit.Test; import java.util.List; @@ -14,7 +14,8 @@ public class ProtoFieldTest { @Test public void shouldReturnNestedAsTrueWhenProtobufFieldTypeIsAMessage() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("duration_value").toProto(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor() + .findFieldByName("duration_value").toProto(); ProtoField protoField = new ProtoField(fieldDescriptorProto); assertTrue(protoField.isNested()); @@ -22,7 +23,8 @@ public void shouldReturnNestedAsTrueWhenProtobufFieldTypeIsAMessage() { @Test public void shouldReturnNestedAsFalseWhenProtobufFieldTypeIsTimestamp() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("timestamp_value").toProto(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor() + .findFieldByName("timestamp_value").toProto(); ProtoField protoField = new ProtoField(fieldDescriptorProto); assertFalse(protoField.isNested()); @@ -30,7 +32,8 @@ public void shouldReturnNestedAsFalseWhenProtobufFieldTypeIsTimestamp() { @Test public void shouldReturnNestedAsFalseWhenProtobufFieldTypeIsStruct() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("struct_value").toProto(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor() + .findFieldByName("struct_value").toProto(); ProtoField protoField = new ProtoField(fieldDescriptorProto); assertFalse(protoField.isNested()); @@ -38,7 +41,8 @@ public void shouldReturnNestedAsFalseWhenProtobufFieldTypeIsStruct() { @Test public void shouldReturnNestedAsFalseWhenProtobufFieldIsScalarValueTypes() { - DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor().findFieldByName("timestamp_value").toProto(); + DescriptorProtos.FieldDescriptorProto fieldDescriptorProto = TestTypesMessage.getDescriptor() + .findFieldByName("timestamp_value").toProto(); ProtoField protoField = new ProtoField(fieldDescriptorProto); assertFalse(protoField.isNested()); @@ -51,9 +55,9 @@ public void shouldReturnProtoFieldString() { ProtoField protoField = new ProtoField(fieldDescriptorProto); List childFields = fieldDescriptor.getMessageType().getFields(); - List fieldList = childFields.stream().map(fd -> new ProtoField(fd.toProto())).collect(Collectors.toList()); - fieldList.forEach(pf -> - protoField.addField(pf)); + List fieldList = childFields.stream().map(fd -> new ProtoField(fd.toProto())) + .collect(Collectors.toList()); + fieldList.forEach(pf -> protoField.addField(pf)); String protoString = protoField.toString(); @@ -63,7 +67,6 @@ public void shouldReturnProtoFieldString() { + "{name='order_details', type=TYPE_STRING, len=0, nested=[]}]}", protoString); } - @Test public void shouldReturnEmptyProtoFieldString() { String protoString = new ProtoField().toString(); diff --git a/src/test/java/org/raystack/depot/bigquery/proto/BigqueryFieldsTest.java b/src/test/java/org/raystack/depot/bigquery/proto/BigqueryFieldsTest.java new file mode 100644 index 00000000..a92267fd --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/proto/BigqueryFieldsTest.java @@ -0,0 +1,328 @@ +package org.raystack.depot.bigquery.proto; + +import com.google.cloud.bigquery.Field; +import com.google.cloud.bigquery.LegacySQLTypeName; +import com.google.protobuf.DescriptorProtos; +import org.raystack.depot.message.proto.TestProtoUtil; +import org.raystack.depot.message.proto.Constants; +import org.raystack.depot.message.proto.ProtoField; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.Assert.assertEquals; + +public class BigqueryFieldsTest { + + private final Map expectedType = new HashMap() { + { + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, LegacySQLTypeName.BYTES); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, LegacySQLTypeName.STRING); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, LegacySQLTypeName.STRING); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, LegacySQLTypeName.BOOLEAN); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, LegacySQLTypeName.FLOAT); + put(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, LegacySQLTypeName.FLOAT); + } + }; + + @Test + public void shouldTestConvertToSchemaSuccessful() { + List nestedBQFields = new ArrayList<>(); + nestedBQFields.add( + TestProtoUtil.createProtoField("field0_bytes", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + nestedBQFields.add( + TestProtoUtil.createProtoField("field1_string", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + nestedBQFields + .add(TestProtoUtil.createProtoField("field2_bool", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + nestedBQFields + .add(TestProtoUtil.createProtoField("field3_enum", DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + nestedBQFields.add( + TestProtoUtil.createProtoField("field4_double", DescriptorProtos.FieldDescriptorProto.Type.TYPE_DOUBLE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + nestedBQFields.add( + TestProtoUtil.createProtoField("field5_float", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + List fields = BigqueryFields.generateBigquerySchema(TestProtoUtil.createProtoField(nestedBQFields)); + assertEquals(nestedBQFields.size(), fields.size()); + IntStream.range(0, nestedBQFields.size()) + .forEach(index -> { + assertEquals(Field.Mode.NULLABLE, fields.get(index).getMode()); + assertEquals(nestedBQFields.get(index).getName(), fields.get(index).getName()); + assertEquals(expectedType.get(nestedBQFields.get(index).getType()), fields.get(index).getType()); + }); + } + + @Test + public void shouldTestShouldConvertIntegerDataTypes() { + List allIntTypes = new ArrayList() { + { + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT64); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_UINT32); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED64); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_FIXED32); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED32); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SFIXED64); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT32); + add(DescriptorProtos.FieldDescriptorProto.Type.TYPE_SINT64); + } + }; + + List nestedBQFields = IntStream.range(0, allIntTypes.size()) + .mapToObj(index -> TestProtoUtil.createProtoField("field-" + index, allIntTypes.get(index), + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)) + .collect(Collectors.toList()); + + List fields = BigqueryFields.generateBigquerySchema(TestProtoUtil.createProtoField(nestedBQFields)); + assertEquals(nestedBQFields.size(), fields.size()); + IntStream.range(0, nestedBQFields.size()) + .forEach(index -> { + assertEquals(Field.Mode.NULLABLE, fields.get(index).getMode()); + assertEquals(nestedBQFields.get(index).getName(), fields.get(index).getName()); + assertEquals(LegacySQLTypeName.INTEGER, fields.get(index).getType()); + }); + } + + @Test + public void shouldTestShouldConvertNestedField() { + List nestedBQFields = new ArrayList<>(); + nestedBQFields.add(TestProtoUtil.createProtoField("field1_level2_nested", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + nestedBQFields.add(TestProtoUtil.createProtoField("field2_level2_nested", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("field1_level1", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + add(TestProtoUtil.createProtoField("field2_level1_message", + "some.type.name", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, + nestedBQFields)); + } + }); + + List fields = BigqueryFields.generateBigquerySchema(protoField); + + assertEquals(protoField.getFields().size(), fields.size()); + assertEquals(nestedBQFields.size(), fields.get(1).getSubFields().size()); + + assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, + fields.get(0)); + assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, + fields.get(1)); + assertBqField(nestedBQFields.get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, + fields.get(1).getSubFields().get(0)); + assertBqField(nestedBQFields.get(1).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, + fields.get(1).getSubFields().get(1)); + + } + + @Test + public void shouldTestShouldConvertMultiNestedFields() { + List nestedBQFields = new ArrayList() { + { + add(TestProtoUtil.createProtoField("field1_level3_nested", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + add(TestProtoUtil.createProtoField("field2_level3_nested", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + } + }; + + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("field1_level1", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + add(TestProtoUtil.createProtoField( + "field2_level1_message", + "some.type.name", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, + new ArrayList() { + { + add(TestProtoUtil.createProtoField( + "field1_level2", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + add(TestProtoUtil.createProtoField( + "field2_level2_message", + "some.type.name", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, + nestedBQFields)); + add(TestProtoUtil.createProtoField( + "field3_level2", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + add(TestProtoUtil.createProtoField( + "field4_level2_message", + "some.type.name", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, + nestedBQFields)); + } + })); + } + }); + + List fields = BigqueryFields.generateBigquerySchema(protoField); + + assertEquals(protoField.getFields().size(), fields.size()); + assertEquals(4, fields.get(1).getSubFields().size()); + assertEquals(2, fields.get(1).getSubFields().get(1).getSubFields().size()); + assertEquals(2, fields.get(1).getSubFields().get(3).getSubFields().size()); + assertMultipleFields(nestedBQFields, fields.get(1).getSubFields().get(1).getSubFields()); + assertMultipleFields(nestedBQFields, fields.get(1).getSubFields().get(3).getSubFields()); + } + + @Test + public void shouldTestConvertToSchemaForTimestamp() { + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("field1_timestamp", + Constants.ProtobufTypeName.TIMESTAMP_PROTOBUF_TYPE_NAME, + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + } + }); + + List fields = BigqueryFields.generateBigquerySchema(protoField); + + assertEquals(protoField.getFields().size(), fields.size()); + assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.TIMESTAMP, Field.Mode.NULLABLE, + fields.get(0)); + } + + @Test + public void shouldTestConvertToSchemaForSpecialFields() { + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("field1_struct", + Constants.ProtobufTypeName.STRUCT_PROTOBUF_TYPE_NAME, + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + add(TestProtoUtil.createProtoField("field2_bytes", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + add(TestProtoUtil.createProtoField("field3_duration", + "." + com.google.protobuf.Duration.getDescriptor().getFullName(), + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, + new ArrayList() { + { + add(TestProtoUtil.createProtoField("duration_seconds", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + add(TestProtoUtil.createProtoField("duration_nanos", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + } + })); + + add(TestProtoUtil.createProtoField("field3_date", + "." + com.google.type.Date.getDescriptor().getFullName(), + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, + new ArrayList() { + { + add(TestProtoUtil.createProtoField("year", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + add(TestProtoUtil.createProtoField("month", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + add(TestProtoUtil.createProtoField("day", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL)); + + } + })); + + } + }); + + List fields = BigqueryFields.generateBigquerySchema(protoField); + + assertEquals(protoField.getFields().size(), fields.size()); + assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.STRING, Field.Mode.NULLABLE, + fields.get(0)); + assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.BYTES, Field.Mode.NULLABLE, + fields.get(1)); + assertBqField(protoField.getFields().get(2).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, + fields.get(2)); + assertBqField(protoField.getFields().get(3).getName(), LegacySQLTypeName.RECORD, Field.Mode.NULLABLE, + fields.get(3)); + assertEquals(2, fields.get(2).getSubFields().size()); + assertBqField("duration_seconds", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, + fields.get(2).getSubFields().get(0)); + assertBqField("duration_nanos", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, + fields.get(2).getSubFields().get(1)); + + assertEquals(3, fields.get(3).getSubFields().size()); + assertBqField("year", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(0)); + assertBqField("month", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(1)); + assertBqField("day", LegacySQLTypeName.INTEGER, Field.Mode.NULLABLE, fields.get(3).getSubFields().get(2)); + } + + @Test + public void shouldTestConvertToSchemaForRepeatedFields() { + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("field1_map", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED)); + add(TestProtoUtil.createProtoField("field2_repeated", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED)); + + } + }); + + List fields = BigqueryFields.generateBigquerySchema(protoField); + + assertEquals(protoField.getFields().size(), fields.size()); + assertBqField(protoField.getFields().get(0).getName(), LegacySQLTypeName.INTEGER, Field.Mode.REPEATED, + fields.get(0)); + assertBqField(protoField.getFields().get(1).getName(), LegacySQLTypeName.STRING, Field.Mode.REPEATED, + fields.get(1)); + } + + public void assertMultipleFields(List pfields, List bqFields) { + IntStream.range(0, bqFields.size()) + .forEach(index -> { + assertBqField(pfields.get(index).getName(), expectedType.get(pfields.get(index).getType()), + Field.Mode.NULLABLE, bqFields.get(index)); + }); + } + + public void assertBqField(String name, LegacySQLTypeName ftype, Field.Mode mode, Field bqf) { + assertEquals(mode, bqf.getMode()); + assertEquals(name, bqf.getName()); + assertEquals(ftype, bqf.getType()); + } + +} diff --git a/src/test/java/io/odpf/depot/bigquery/proto/BigqueryProtoUpdateListenerTest.java b/src/test/java/org/raystack/depot/bigquery/proto/BigqueryProtoUpdateListenerTest.java similarity index 60% rename from src/test/java/io/odpf/depot/bigquery/proto/BigqueryProtoUpdateListenerTest.java rename to src/test/java/org/raystack/depot/bigquery/proto/BigqueryProtoUpdateListenerTest.java index 6caff748..c82fb80d 100644 --- a/src/test/java/io/odpf/depot/bigquery/proto/BigqueryProtoUpdateListenerTest.java +++ b/src/test/java/org/raystack/depot/bigquery/proto/BigqueryProtoUpdateListenerTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigquery.proto; +package org.raystack.depot.bigquery.proto; import com.fasterxml.jackson.databind.node.JsonNodeFactory; import com.fasterxml.jackson.databind.node.ObjectNode; @@ -6,19 +6,19 @@ import com.google.cloud.bigquery.LegacySQLTypeName; import com.google.protobuf.Descriptors.Descriptor; import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.depot.TestKeyBQ; -import io.odpf.depot.common.TupleString; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.message.proto.TestProtoUtil; -import io.odpf.depot.bigquery.converter.MessageRecordConverterCache; -import io.odpf.depot.bigquery.client.BigQueryClient; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.proto.ProtoField; -import io.odpf.depot.bigquery.models.Records; -import io.odpf.depot.config.BigQuerySinkConfig; -import io.odpf.depot.common.Tuple; -import io.odpf.stencil.client.StencilClient; +import org.raystack.depot.TestKeyBQ; +import org.raystack.depot.message.proto.TestProtoUtil; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.common.TupleString; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.bigquery.converter.MessageRecordConverterCache; +import org.raystack.depot.bigquery.client.BigQueryClient; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.proto.ProtoField; +import org.raystack.depot.bigquery.models.Records; +import org.raystack.stencil.client.StencilClient; import org.aeonbits.owner.ConfigFactory; import org.junit.Assert; import org.junit.Before; @@ -49,7 +49,7 @@ public class BigqueryProtoUpdateListenerTest { @Before public void setUp() throws InvalidProtocolBufferException { - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestKeyBQ"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestKeyBQ"); System.setProperty("SINK_BIGQUERY_ENABLE_AUTO_SCHEMA_UPDATE", "false"); System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); System.setProperty("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "topic=string,partition=integer,offset=integer"); @@ -60,42 +60,50 @@ public void setUp() throws InvalidProtocolBufferException { @Test public void shouldUseNewSchemaIfProtoChanges() { - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); returnedProtoField.addField(TestProtoUtil.createProtoField("order_url", 2)); - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(BigqueryFields.getMetadataFields(new ArrayList() {{ - add(new TupleString("topic", "string")); - add(new TupleString("partition", "integer")); - add(new TupleString("offset", "integer")); - }})); - }}; + ArrayList bqSchemaFields = new ArrayList() { + { + add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + addAll(BigqueryFields.getMetadataFields(new ArrayList() { + { + add(new TupleString("topic", "string")); + add(new TupleString("partition", "integer")); + add(new TupleString("offset", "integer")); + } + })); + } + }; doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - OdpfMessageParser parser = new ProtoOdpfMessageParser(stencilClient); - bigqueryProtoUpdateListener.setOdpfMessageParser(parser); + MessageParser parser = new ProtoMessageParser(stencilClient); + bigqueryProtoUpdateListener.setMessageParser(parser); bigqueryProtoUpdateListener.onSchemaUpdate(descriptorsMap); TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - OdpfMessage testMessage = new OdpfMessage( + Message testMessage = new Message( "".getBytes(), testKeyBQ.toByteArray(), new Tuple<>("topic", "topic"), new Tuple<>("partition", 1), new Tuple<>("offset", 1)); - Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage)); + Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter() + .convert(Collections.singletonList(testMessage)); Assert.assertEquals(1, convert.getValidRecords().size()); Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); @@ -103,57 +111,66 @@ public void shouldUseNewSchemaIfProtoChanges() { @Test public void shouldUseNewSchemaIfProtoChangesWhenNullDescriptorMapSentInBqSinkFactoryInit() { - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); returnedProtoField.addField(TestProtoUtil.createProtoField("order_url", 2)); - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); when(stencilClient.getAll()).thenReturn(descriptorsMap); ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(BigqueryFields.getMetadataFields(new ArrayList() {{ - add(new TupleString("topic", "string")); - add(new TupleString("partition", "integer")); - add(new TupleString("offset", "integer")); - }})); - }}; + ArrayList bqSchemaFields = new ArrayList() { + { + add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + addAll(BigqueryFields.getMetadataFields(new ArrayList() { + { + add(new TupleString("topic", "string")); + add(new TupleString("partition", "integer")); + add(new TupleString("offset", "integer")); + } + })); + } + }; doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - OdpfMessageParser parser = new ProtoOdpfMessageParser(stencilClient); - bigqueryProtoUpdateListener.setOdpfMessageParser(parser); + MessageParser parser = new ProtoMessageParser(stencilClient); + bigqueryProtoUpdateListener.setMessageParser(parser); bigqueryProtoUpdateListener.onSchemaUpdate(null); TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - OdpfMessage testMessage = new OdpfMessage( + Message testMessage = new Message( "".getBytes(), testKeyBQ.toByteArray(), new Tuple<>("topic", "topic"), new Tuple<>("partition", 1), new Tuple<>("offset", 1)); - Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage)); + Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter() + .convert(Collections.singletonList(testMessage)); Assert.assertEquals(1, convert.getValidRecords().size()); Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); } - - @Test(expected = RuntimeException.class) public void shouldThrowExceptionIfParserFails() { - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), null); - }}; + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); + + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), null); + } + }; ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); @@ -163,14 +180,17 @@ public void shouldThrowExceptionIfParserFails() { @Test(expected = RuntimeException.class) public void shouldThrowExceptionIfConverterFails() { - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); returnedProtoField.addField(TestProtoUtil.createProtoField("order_url", 2)); - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); @@ -180,15 +200,18 @@ public void shouldThrowExceptionIfConverterFails() { @Test(expected = RuntimeException.class) public void shouldThrowExceptionIfDatasetLocationIsChanged() throws IOException { - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); returnedProtoField.addField(TestProtoUtil.createProtoField("order_url", 2)); - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); @@ -198,41 +221,49 @@ public void shouldThrowExceptionIfDatasetLocationIsChanged() throws IOException @Test public void shouldNotNamespaceMetadataFieldsWhenNamespaceIsNotProvided() throws IOException { - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); returnedProtoField.addField(TestProtoUtil.createProtoField("order_url", 2)); - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - addAll(BigqueryFields.getMetadataFields(new ArrayList() {{ - add(new TupleString("topic", "string")); - add(new TupleString("partition", "integer")); - add(new TupleString("offset", "integer")); - }})); - }}; + ArrayList bqSchemaFields = new ArrayList() { + { + add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + addAll(BigqueryFields.getMetadataFields(new ArrayList() { + { + add(new TupleString("topic", "string")); + add(new TupleString("partition", "integer")); + add(new TupleString("offset", "integer")); + } + })); + } + }; doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - OdpfMessageParser parser = new ProtoOdpfMessageParser(stencilClient); - bigqueryProtoUpdateListener.setOdpfMessageParser(parser); + MessageParser parser = new ProtoMessageParser(stencilClient); + bigqueryProtoUpdateListener.setMessageParser(parser); bigqueryProtoUpdateListener.onSchemaUpdate(descriptorsMap); TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - OdpfMessage testMessage = new OdpfMessage( + Message testMessage = new Message( "".getBytes(), testKeyBQ.toByteArray(), new Tuple<>("topic", "topic"), new Tuple<>("partition", 1), new Tuple<>("offset", 1)); - Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage)); + Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter() + .convert(Collections.singletonList(testMessage)); Assert.assertEquals(1, convert.getValidRecords().size()); Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); @@ -243,42 +274,51 @@ public void shouldNotNamespaceMetadataFieldsWhenNamespaceIsNotProvided() throws public void shouldNamespaceMetadataFieldsWhenNamespaceIsProvided() throws IOException { System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "metadata_ns"); config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); returnedProtoField.addField(TestProtoUtil.createProtoField("order_url", 2)); - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; when(stencilClient.get(TestKeyBQ.class.getName())).thenReturn(descriptorsMap.get(TestKeyBQ.class.getName())); ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); objNode.put("2", "order_url"); - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(BigqueryFields.getNamespacedMetadataField(config.getBqMetadataNamespace(), new ArrayList() {{ - add(new TupleString("topic", "string")); - add(new TupleString("partition", "integer")); - add(new TupleString("offset", "integer")); - }})); - }}; + ArrayList bqSchemaFields = new ArrayList() { + { + add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(BigqueryFields.getNamespacedMetadataField(config.getBqMetadataNamespace(), + new ArrayList() { + { + add(new TupleString("topic", "string")); + add(new TupleString("partition", "integer")); + add(new TupleString("offset", "integer")); + } + })); + } + }; doNothing().when(bigQueryClient).upsertTable(bqSchemaFields); - OdpfMessageParser parser = new ProtoOdpfMessageParser(stencilClient); - bigqueryProtoUpdateListener.setOdpfMessageParser(parser); + MessageParser parser = new ProtoMessageParser(stencilClient); + bigqueryProtoUpdateListener.setMessageParser(parser); bigqueryProtoUpdateListener.onSchemaUpdate(descriptorsMap); TestKeyBQ testKeyBQ = TestKeyBQ.newBuilder().setOrderNumber("order").setOrderUrl("test").build(); - OdpfMessage testMessage = new OdpfMessage( + Message testMessage = new Message( "".getBytes(), testKeyBQ.toByteArray(), new Tuple<>("topic", "topic"), new Tuple<>("partition", 1), new Tuple<>("offset", 1)); - Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter().convert(Collections.singletonList(testMessage)); + Records convert = bigqueryProtoUpdateListener.getConverterCache().getMessageRecordConverter() + .convert(Collections.singletonList(testMessage)); Assert.assertEquals(1, convert.getValidRecords().size()); Assert.assertEquals("order", convert.getValidRecords().get(0).getColumns().get("order_number")); Assert.assertEquals("test", convert.getValidRecords().get(0).getColumns().get("order_url")); @@ -289,9 +329,11 @@ public void shouldNamespaceMetadataFieldsWhenNamespaceIsProvided() throws IOExce @Test public void shouldThrowExceptionWhenMetadataNamespaceNameCollidesWithAnyFieldName() throws IOException { - System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "order_number"); // set field name to an existing column name + System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "order_number"); // set field name to an existing column + // name config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); - BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, bigQueryClient, converterWrapper); + BigqueryProtoUpdateListener bigqueryProtoUpdateListener = new BigqueryProtoUpdateListener(config, + bigQueryClient, converterWrapper); ProtoField returnedProtoField = new ProtoField(); returnedProtoField.addField(TestProtoUtil.createProtoField("order_number", 1)); @@ -301,26 +343,34 @@ public void shouldThrowExceptionWhenMetadataNamespaceNameCollidesWithAnyFieldNam objNode.put("1", "order_number"); objNode.put("2", "order_url"); - ArrayList bqSchemaFields = new ArrayList() {{ - add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); - add(BigqueryFields.getNamespacedMetadataField(config.getBqMetadataNamespace(), new ArrayList() {{ - add(new TupleString("topic", "string")); - add(new TupleString("partition", "integer")); - add(new TupleString("offset", "integer")); - }})); - }}; - - HashMap descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - }}; - OdpfMessageParser parser = new ProtoOdpfMessageParser(stencilClient); - bigqueryProtoUpdateListener.setOdpfMessageParser(parser); + ArrayList bqSchemaFields = new ArrayList() { + { + add(Field.newBuilder("order_number", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(Field.newBuilder("order_url", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build()); + add(BigqueryFields.getNamespacedMetadataField(config.getBqMetadataNamespace(), + new ArrayList() { + { + add(new TupleString("topic", "string")); + add(new TupleString("partition", "integer")); + add(new TupleString("offset", "integer")); + } + })); + } + }; + + HashMap descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + } + }; + MessageParser parser = new ProtoMessageParser(stencilClient); + bigqueryProtoUpdateListener.setMessageParser(parser); Exception exception = Assertions.assertThrows(RuntimeException.class, () -> { bigqueryProtoUpdateListener.onSchemaUpdate(descriptorsMap); }); - Assert.assertEquals("Metadata field(s) is already present in the schema. fields: [order_number]", exception.getMessage()); + Assert.assertEquals("Metadata field(s) is already present in the schema. fields: [order_number]", + exception.getMessage()); verify(bigQueryClient, times(0)).upsertTable(bqSchemaFields); System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); } diff --git a/src/test/java/org/raystack/depot/bigquery/storage/BigQueryStorageResponseParserTest.java b/src/test/java/org/raystack/depot/bigquery/storage/BigQueryStorageResponseParserTest.java new file mode 100644 index 00000000..d230ae8f --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/storage/BigQueryStorageResponseParserTest.java @@ -0,0 +1,271 @@ +package org.raystack.depot.bigquery.storage; + +import com.google.cloud.bigquery.storage.v1.AppendRowsResponse; +import com.google.cloud.bigquery.storage.v1.Exceptions; +import com.google.cloud.bigquery.storage.v1.RowError; +import com.google.rpc.Code; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.bigquery.storage.proto.BigQueryRecordMeta; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import io.grpc.Status; +import io.grpc.StatusRuntimeException; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +public class BigQueryStorageResponseParserTest { + + private Instrumentation instrumentation; + private BigQueryStorageResponseParser responseParser; + + @Before + public void setup() { + instrumentation = Mockito.mock(Instrumentation.class); + BigQuerySinkConfig sinkConfig = Mockito.mock(BigQuerySinkConfig.class); + BigQueryMetrics bigQueryMetrics = new BigQueryMetrics(sinkConfig); + responseParser = new BigQueryStorageResponseParser(sinkConfig, instrumentation, bigQueryMetrics); + } + + @Test + public void shouldReturnErrorFromStatus() { + com.google.rpc.Status status = com.google.rpc.Status.newBuilder().setCode(Code.PERMISSION_DENIED_VALUE) + .setMessage("test error").build(); + ErrorInfo error = BigQueryStorageResponseParser.getError(status); + assert error != null; + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, error.getErrorType()); + Assert.assertEquals("test error", error.getException().getMessage()); + + status = com.google.rpc.Status.newBuilder().setCode(Code.INTERNAL_VALUE).setMessage("test 5xx error").build(); + error = BigQueryStorageResponseParser.getError(status); + assert error != null; + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, error.getErrorType()); + Assert.assertEquals("test 5xx error", error.getException().getMessage()); + } + + @Test + public void shouldReturnRetryBoolean() { + Assert.assertTrue(BigQueryStorageResponseParser.shouldRetry(Status.ABORTED)); + Assert.assertTrue(BigQueryStorageResponseParser.shouldRetry(Status.INTERNAL)); + Assert.assertTrue(BigQueryStorageResponseParser.shouldRetry(Status.CANCELLED)); + Assert.assertTrue(BigQueryStorageResponseParser.shouldRetry(Status.FAILED_PRECONDITION)); + Assert.assertTrue(BigQueryStorageResponseParser.shouldRetry(Status.DEADLINE_EXCEEDED)); + Assert.assertTrue(BigQueryStorageResponseParser.shouldRetry(Status.UNAVAILABLE)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.OK)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.UNKNOWN)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.INVALID_ARGUMENT)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.NOT_FOUND)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.ALREADY_EXISTS)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.PERMISSION_DENIED)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.RESOURCE_EXHAUSTED)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.OUT_OF_RANGE)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.UNIMPLEMENTED)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.DATA_LOSS)); + Assert.assertFalse(BigQueryStorageResponseParser.shouldRetry(Status.UNAUTHENTICATED)); + } + + @Test + public void shouldReturn4xx() { + RowError rowError = Mockito.mock(RowError.class); + Mockito.when(rowError.getMessage()).thenReturn("row error"); + ErrorInfo error = BigQueryStorageResponseParser.get4xxError(rowError); + Assert.assertEquals("row error", error.getException().getMessage()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, error.getErrorType()); + } + + @Test + public void shouldSetErrorResponse() { + BigQueryPayload payload = new BigQueryPayload(); + payload.addMetadataRecord(new BigQueryRecordMeta(0, null, true)); + payload.addMetadataRecord(new BigQueryRecordMeta(1, + new ErrorInfo(new Exception("error1"), ErrorType.DESERIALIZATION_ERROR), false)); + payload.addMetadataRecord(new BigQueryRecordMeta(2, null, true)); + payload.addMetadataRecord(new BigQueryRecordMeta(3, + new ErrorInfo(new Exception("error2"), ErrorType.UNKNOWN_FIELDS_ERROR), false)); + payload.addMetadataRecord(new BigQueryRecordMeta(4, + new ErrorInfo(new Exception("error3"), ErrorType.INVALID_MESSAGE_ERROR), false)); + List messages = createMockMessages(); + Mockito.when(messages.get(1).getMetadataString()).thenReturn("meta1"); + Mockito.when(messages.get(3).getMetadataString()).thenReturn("meta2"); + Mockito.when(messages.get(4).getMetadataString()).thenReturn("meta3"); + SinkResponse response = new SinkResponse(); + responseParser.setSinkResponseForInvalidMessages(payload, messages, response); + Assert.assertEquals(3, response.getErrors().size()); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, response.getErrors().get(1L).getErrorType()); + Assert.assertEquals(ErrorType.UNKNOWN_FIELDS_ERROR, response.getErrors().get(3L).getErrorType()); + Assert.assertEquals(ErrorType.INVALID_MESSAGE_ERROR, response.getErrors().get(4L).getErrorType()); + Assert.assertEquals("error1", response.getErrors().get(1L).getException().getMessage()); + Assert.assertEquals("error2", response.getErrors().get(3L).getException().getMessage()); + Assert.assertEquals("error3", response.getErrors().get(4L).getException().getMessage()); + + List metaList = new ArrayList<>(); + payload.forEach(metaList::add); + + Mockito.verify(instrumentation, Mockito.times(1)).logError( + "Error {} occurred while converting to payload for record {}", + metaList.get(1).getErrorInfo(), "meta1"); + Mockito.verify(instrumentation, Mockito.times(1)).logError( + "Error {} occurred while converting to payload for record {}", + metaList.get(3).getErrorInfo(), "meta2"); + Mockito.verify(instrumentation, Mockito.times(1)).logError( + "Error {} occurred while converting to payload for record {}", + metaList.get(4).getErrorInfo(), "meta3"); + + } + + @Test + public void shouldSetResponseForError() { + BigQueryPayload payload = new BigQueryPayload(); + payload.putValidIndexToInputIndex(0L, 0L); + payload.putValidIndexToInputIndex(1L, 3L); + payload.putValidIndexToInputIndex(2L, 4L); + List messages = createMockMessages(); + AppendRowsResponse appendRowsResponse = AppendRowsResponse.newBuilder().setError( + com.google.rpc.Status.newBuilder().setMessage("test error").setCode(Code.UNAVAILABLE_VALUE).build()) + .build(); + SinkResponse sinkResponse = new SinkResponse(); + responseParser.setSinkResponseForErrors(payload, appendRowsResponse, messages, sinkResponse); + Assert.assertEquals(3, sinkResponse.getErrors().size()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, sinkResponse.getErrors().get(0L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, sinkResponse.getErrors().get(3L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, sinkResponse.getErrors().get(4L).getErrorType()); + + Assert.assertEquals("test error", sinkResponse.getErrors().get(0L).getException().getMessage()); + Assert.assertEquals("test error", sinkResponse.getErrors().get(3L).getException().getMessage()); + Assert.assertEquals("test error", sinkResponse.getErrors().get(4L).getException().getMessage()); + } + + @Test + public void shouldSetResponseForRowError() { + BigQueryPayload payload = new BigQueryPayload(); + payload.putValidIndexToInputIndex(0L, 0L); + payload.putValidIndexToInputIndex(1L, 3L); + payload.putValidIndexToInputIndex(2L, 4L); + List messages = createMockMessages(); + AppendRowsResponse appendRowsResponse = AppendRowsResponse.newBuilder() + .setError(com.google.rpc.Status.newBuilder().setMessage("test error").setCode(Code.UNAVAILABLE_VALUE) + .build()) + .addRowErrors(RowError.newBuilder().setIndex(1L).setMessage("row error1").build()) + .addRowErrors(RowError.newBuilder().setIndex(2L).setMessage("row error2").build()) + .build(); + SinkResponse sinkResponse = new SinkResponse(); + responseParser.setSinkResponseForErrors(payload, appendRowsResponse, messages, sinkResponse); + Assert.assertEquals(3, sinkResponse.getErrors().size()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, sinkResponse.getErrors().get(0L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, sinkResponse.getErrors().get(3L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, sinkResponse.getErrors().get(4L).getErrorType()); + + Assert.assertEquals("test error", sinkResponse.getErrors().get(0L).getException().getMessage()); + Assert.assertEquals("row error1", sinkResponse.getErrors().get(3L).getException().getMessage()); + Assert.assertEquals("row error2", sinkResponse.getErrors().get(4L).getException().getMessage()); + } + + private List createMockMessages() { + List messages = new ArrayList<>(); + Message m1 = Mockito.mock(Message.class); + Message m2 = Mockito.mock(Message.class); + Message m3 = Mockito.mock(Message.class); + Message m4 = Mockito.mock(Message.class); + Message m5 = Mockito.mock(Message.class); + messages.add(m1); + messages.add(m2); + messages.add(m3); + messages.add(m4); + messages.add(m5); + return messages; + } + + @Test + public void shouldSetSinkResponseForException() { + Throwable cause = new StatusRuntimeException(Status.INTERNAL); + BigQueryPayload payload = new BigQueryPayload(); + payload.putValidIndexToInputIndex(0L, 0L); + payload.putValidIndexToInputIndex(1L, 1L); + payload.putValidIndexToInputIndex(2L, 2L); + payload.putValidIndexToInputIndex(3L, 3L); + payload.putValidIndexToInputIndex(4L, 4L); + List messages = createMockMessages(); + SinkResponse response = new SinkResponse(); + responseParser.setSinkResponseForException(cause, payload, messages, response); + Assert.assertEquals(5, response.getErrors().size()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, response.getErrors().get(0L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, response.getErrors().get(1L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, response.getErrors().get(2L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, response.getErrors().get(3L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, response.getErrors().get(4L).getErrorType()); + Assert.assertEquals("io.grpc.StatusRuntimeException: INTERNAL", + response.getErrors().get(0L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: INTERNAL", + response.getErrors().get(1L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: INTERNAL", + response.getErrors().get(2L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: INTERNAL", + response.getErrors().get(3L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: INTERNAL", + response.getErrors().get(4L).getException().getMessage()); + } + + @Test + public void shouldSetSinkResponseForExceptionWithNonRetry() { + Throwable cause = new StatusRuntimeException(Status.RESOURCE_EXHAUSTED); + BigQueryPayload payload = new BigQueryPayload(); + payload.putValidIndexToInputIndex(0L, 0L); + payload.putValidIndexToInputIndex(1L, 1L); + payload.putValidIndexToInputIndex(2L, 2L); + payload.putValidIndexToInputIndex(3L, 3L); + payload.putValidIndexToInputIndex(4L, 4L); + List messages = createMockMessages(); + SinkResponse response = new SinkResponse(); + responseParser.setSinkResponseForException(cause, payload, messages, response); + Assert.assertEquals(5, response.getErrors().size()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(0L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(1L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(2L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(3L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(4L).getErrorType()); + Assert.assertEquals("io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED", + response.getErrors().get(0L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED", + response.getErrors().get(1L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED", + response.getErrors().get(2L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED", + response.getErrors().get(3L).getException().getMessage()); + Assert.assertEquals("io.grpc.StatusRuntimeException: RESOURCE_EXHAUSTED", + response.getErrors().get(4L).getException().getMessage()); + } + + @Test + public void shouldSetSinkResponseForExceptionWithAppendError() { + Map rowsToErrorMessages = new HashMap<>(); + rowsToErrorMessages.put(0, "message1"); + rowsToErrorMessages.put(2, "message2"); + Throwable cause = new Exceptions.AppendSerializationError(404, "test error", "default", rowsToErrorMessages); + BigQueryPayload payload = new BigQueryPayload(); + payload.putValidIndexToInputIndex(0L, 0L); + payload.putValidIndexToInputIndex(1L, 3L); + payload.putValidIndexToInputIndex(2L, 4L); + List messages = createMockMessages(); + SinkResponse response = new SinkResponse(); + responseParser.setSinkResponseForException(cause, payload, messages, response); + Assert.assertEquals(3, response.getErrors().size()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(0L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_5XX_ERROR, response.getErrors().get(3L).getErrorType()); + Assert.assertEquals(ErrorType.SINK_4XX_ERROR, response.getErrors().get(4L).getErrorType()); + Assert.assertEquals("message1", response.getErrors().get(0L).getException().getMessage()); + Assert.assertEquals( + "com.google.cloud.bigquery.storage.v1.Exceptions$AppendSerializationError: UNKNOWN: test error", + response.getErrors().get(3L).getException().getMessage()); + Assert.assertEquals("message2", response.getErrors().get(4L).getException().getMessage()); + } +} diff --git a/src/test/java/org/raystack/depot/bigquery/storage/BigQueryWriterUtilsTest.java b/src/test/java/org/raystack/depot/bigquery/storage/BigQueryWriterUtilsTest.java new file mode 100644 index 00000000..67495ded --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/storage/BigQueryWriterUtilsTest.java @@ -0,0 +1,5 @@ +package org.raystack.depot.bigquery.storage; + +public class BigQueryWriterUtilsTest { + +} diff --git a/src/test/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStorageClientTest.java b/src/test/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStorageClientTest.java new file mode 100644 index 00000000..1881d67a --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoStorageClientTest.java @@ -0,0 +1,767 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.cloud.bigquery.storage.v1.BQTableSchemaToProtoDescriptor; +import com.google.cloud.bigquery.storage.v1.ProtoRows; +import com.google.cloud.bigquery.storage.v1.TableFieldSchema; +import com.google.cloud.bigquery.storage.v1.TableSchema; +import com.google.protobuf.*; +import org.raystack.depot.*; +import org.raystack.depot.bigquery.storage.BigQueryPayload; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.message.proto.TestProtoUtil; +import org.raystack.stencil.client.ClassLoadStencilClient; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; +import org.skyscreamer.jsonassert.JSONAssert; +import org.threeten.extra.Days; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.time.Instant; +import java.util.*; +import java.util.concurrent.TimeUnit; +import java.util.stream.Collectors; + +import static org.mockito.Mockito.CALLS_REAL_METHODS; + +public class BigQueryProtoStorageClientTest { + + private Descriptors.Descriptor testDescriptor; + private BigQueryProtoStorageClient converter; + private TableSchema testMessageBQSchema; + private ProtoMessageParser protoMessageParser; + + @Before + public void setUp() throws IOException, Descriptors.DescriptorValidationException { + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestMessageBQ"); + System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", ""); + System.setProperty("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", + "message_offset=integer,message_topic=string,load_time=timestamp,message_timestamp=timestamp,message_partition=integer"); + System.setProperty("SINK_BIGQUERY_TABLE_PARTITION_KEY", "created_at"); + System.setProperty("SINK_BIGQUERY_TABLE_PARTITIONING_ENABLE", "true"); + ClassLoadStencilClient stencilClient = Mockito.mock(ClassLoadStencilClient.class, CALLS_REAL_METHODS); + protoMessageParser = new ProtoMessageParser(stencilClient); + testMessageBQSchema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("message_offset") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("message_topic") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("load_time") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("message_timestamp") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("message_partition") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("order_number") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("created_at") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("aliases") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("discount") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("order_url") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("price") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.DOUBLE) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("user_token") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.BYTES) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("counter") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("status") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("properties") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("trip_duration") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRUCT) + .addFields(TableFieldSchema.newBuilder() + .setName("seconds") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("nanos") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("current_state") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.STRUCT) + .addFields(TableFieldSchema.newBuilder() + .setName("key") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("value") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("updated_at") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("camelCase") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .build(); + testDescriptor = BQTableSchemaToProtoDescriptor + .convertBQTableSchemaToProtoDescriptor(testMessageBQSchema); + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); + BigQueryProtoWriter writer = Mockito.mock(BigQueryProtoWriter.class); + converter = new BigQueryProtoStorageClient(writer, config, protoMessageParser); + Mockito.when(writer.getDescriptor()).thenReturn(testDescriptor); + } + + @Test + public void shouldConvertPrimitiveFields() throws Exception { + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setOrderNumber("order-no-112") + .setOrderUrl("order-url-1") + .setDiscount(1200L) + .setCreatedAt(Timestamp.newBuilder().setSeconds(Instant.now().getEpochSecond())) + .setPrice(23) + .setUserToken(ByteString.copyFrom("test-token".getBytes())) + .setCounter(20) + .setStatus(StatusBQ.COMPLETED) + .addAliases("alias1").addAliases("alias2") + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + Assert.assertEquals(1, payload.getPayloadIndexes().size()); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + Assert.assertEquals("order-no-112", + convertedMessage.getField(testDescriptor.findFieldByName("order_number"))); + Assert.assertEquals("order-url-1", + convertedMessage.getField(testDescriptor.findFieldByName("order_url"))); + Assert.assertEquals(1200L, convertedMessage.getField(testDescriptor.findFieldByName("discount"))); + Assert.assertEquals(ByteString.copyFrom("test-token".getBytes()), + convertedMessage.getField(testDescriptor.findFieldByName("user_token"))); + List aliases = (List) convertedMessage.getField(testDescriptor.findFieldByName("aliases")); + Assert.assertEquals("alias1", aliases.get(0)); + Assert.assertEquals("alias2", aliases.get(1)); + Assert.assertEquals(20L, convertedMessage.getField(testDescriptor.findFieldByName("counter"))); + Assert.assertEquals("COMPLETED", convertedMessage.getField(testDescriptor.findFieldByName("status"))); + } + + @Test + public void shouldReturnCaseInsensitiveFields() throws InvalidProtocolBufferException { + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCamelCase("testing") + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + Assert.assertEquals("testing", convertedMessage.getField(testDescriptor.findFieldByName("camelcase"))); + } + + @Test + public void shouldReturnDurationField() throws IOException { + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(Instant.now().getEpochSecond())) + .setTripDuration(Duration.newBuilder().setSeconds(1234L).setNanos(231).build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + DynamicMessage tripDuration = ((DynamicMessage) convertedMessage + .getField(testDescriptor.findFieldByName("trip_duration"))); + Assert.assertEquals(1234L, + tripDuration.getField(tripDuration.getDescriptorForType().findFieldByName("seconds"))); + Assert.assertEquals(231L, + tripDuration.getField(tripDuration.getDescriptorForType().findFieldByName("nanos"))); + } + + @Test + public void shouldReturnMapField() throws Exception { + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .putCurrentState("k4", "v4") + .setCreatedAt(Timestamp.newBuilder().setSeconds(Instant.now().getEpochSecond())) + .putCurrentState("k3", "v3") + .putCurrentState("k1", "v1") + .putCurrentState("k2", "v2") + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + List currentState = ((List) convertedMessage + .getField(testDescriptor.findFieldByName("current_state"))); + List> actual = currentState.stream().map(o -> { + Map values = ((DynamicMessage) o).getAllFields().entrySet().stream().collect( + Collectors.toMap(s -> s.getKey().getName(), s -> s.getValue().toString())); + return new Tuple<>(values.get("key"), values.get("value")); + }).collect(Collectors.toList()); + actual.sort(Comparator.comparing(Tuple::getFirst)); + List> expected = new ArrayList>() { + { + add(new Tuple<>("k1", "v1")); + add(new Tuple<>("k2", "v2")); + add(new Tuple<>("k3", "v3")); + add(new Tuple<>("k4", "v4")); + } + }; + Assert.assertEquals(expected, actual); + } + + @Test + public void shouldReturnComplexAndNestedType() throws Descriptors.DescriptorValidationException, IOException { + TableSchema schema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("single_message") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRUCT) + .addAllFields(testMessageBQSchema.getFieldsList()) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("repeated_message") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.STRUCT) + .addAllFields(testMessageBQSchema.getFieldsList()) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("number_field") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("repeated_number_field") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.INT64) + .build()) + .build(); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", + "org.raystack.depot.TestNestedRepeatedMessageBQ"); + testDescriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(schema); + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); + BigQueryProtoWriter writer = Mockito.mock(BigQueryProtoWriter.class); + converter = new BigQueryProtoStorageClient(writer, config, protoMessageParser); + Mockito.when(writer.getDescriptor()).thenReturn(testDescriptor); + + Instant now = Instant.now(); + TestMessageBQ singleMessage = TestProtoUtil.generateTestMessage(now); + TestMessageBQ nested1 = TestProtoUtil.generateTestMessage(now); + TestMessageBQ nested2 = TestProtoUtil.generateTestMessage(now); + TestNestedRepeatedMessageBQ message = TestNestedRepeatedMessageBQ.newBuilder() + .setNumberField(123) + .setSingleMessage(singleMessage) + .addRepeatedMessage(nested1) + .addRepeatedMessage(nested2) + .addRepeatedNumberField(11) + .addRepeatedNumberField(12) + .addRepeatedNumberField(13) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, message.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + DynamicMessage sm1 = (DynamicMessage) convertedMessage + .getField(testDescriptor.findFieldByName("single_message")); + Assert.assertEquals(singleMessage.getOrderNumber(), + sm1.getField(sm1.getDescriptorForType().findFieldByName("order_number"))); + List nestedMessage = (List) convertedMessage + .getField(testDescriptor.findFieldByName("repeated_message")); + Assert.assertEquals(2, nestedMessage.size()); + DynamicMessage nestedMessage1 = nestedMessage.get(0); + DynamicMessage nestedMessage2 = nestedMessage.get(1); + Assert.assertEquals(nested1.getOrderNumber(), + nestedMessage1.getField(sm1.getDescriptorForType().findFieldByName("order_number"))); + Assert.assertEquals(nested2.getOrderNumber(), + nestedMessage2.getField(sm1.getDescriptorForType().findFieldByName("order_number"))); + Assert.assertEquals(123L, convertedMessage.getField(testDescriptor.findFieldByName("number_field"))); + List repeatedNumbers = (List) convertedMessage + .getField(testDescriptor.findFieldByName("repeated_number_field")); + Assert.assertEquals(3, repeatedNumbers.size()); + Assert.assertEquals(Long.valueOf(11), repeatedNumbers.get(0)); + Assert.assertEquals(Long.valueOf(12), repeatedNumbers.get(1)); + Assert.assertEquals(Long.valueOf(13), repeatedNumbers.get(2)); + } + + @Test + public void shouldConvertTimeStamp() throws IOException { + Instant now = Instant.now(); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + long createdAt = (long) convertedMessage.getField(testDescriptor.findFieldByName("created_at")); + // Microseconds + Assert.assertEquals(TimeUnit.SECONDS.toMicros(now.getEpochSecond()), createdAt); + List updatedAt = (List) convertedMessage.getField(testDescriptor.findFieldByName("updated_at")); + Assert.assertEquals(TimeUnit.SECONDS.toMicros(now.getEpochSecond()), updatedAt.get(0)); + Assert.assertEquals(TimeUnit.SECONDS.toMicros(now.getEpochSecond()), updatedAt.get(1)); + } + + @Test + public void shouldConvertStruct() throws IOException { + ListValue.Builder builder = ListValue.newBuilder(); + ListValue listValue = builder + .addValues(Value.newBuilder().setNumberValue(1).build()) + .addValues(Value.newBuilder().setNumberValue(2).build()) + .addValues(Value.newBuilder().setNumberValue(3).build()) + .build(); + Struct value = Struct.newBuilder() + .putFields("string", Value.newBuilder().setStringValue("string_val").build()) + .putFields("list", Value.newBuilder().setListValue(listValue).build()) + .putFields("boolean", Value.newBuilder().setBoolValue(true).build()) + .putFields("number", Value.newBuilder().setNumberValue(123.45).build()) + .build(); + + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setOrderNumber("order-1") + .setCreatedAt(Timestamp.newBuilder().setSeconds(Instant.now().getEpochSecond())) + .setProperties(value) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + String properties = (String) (convertedMessage.getField(testDescriptor.findFieldByName("properties"))); + String expected = "{\n" + + " \"number\": 123.45,\n" + + " \"string\": \"string_val\",\n" + + " \"list\": [\n" + + " 1,\n" + + " 2,\n" + + " 3\n" + + " ],\n" + + " \"boolean\": true\n" + + "}\n"; + JSONAssert.assertEquals(expected, properties, true); + } + + @Test + public void shouldHaveMetadataOnPayload() throws InvalidProtocolBufferException { + Instant now = Instant.now(); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message( + null, + m1.toByteArray(), + new Tuple<>("message_partition", 10), + new Tuple<>("message_topic", "test-topic"), + new Tuple<>("message_offset", 143), + new Tuple<>("load_time", now.toEpochMilli()), + new Tuple<>("message_timestamp", now.toEpochMilli()))); + add(new Message( + null, + m1.toByteArray(), + new Tuple<>("message_partition", 10), + new Tuple<>("message_topic", "test-topic"), + new Tuple<>("message_offset", 144L), + new Tuple<>("load_time", now.toEpochMilli()), + new Tuple<>("message_timestamp", now.toEpochMilli()))); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(2, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + Assert.assertEquals(10L, + convertedMessage.getField(testDescriptor.findFieldByName("message_partition"))); + Assert.assertEquals("test-topic", + convertedMessage.getField(testDescriptor.findFieldByName("message_topic"))); + Assert.assertEquals(143L, convertedMessage.getField(testDescriptor.findFieldByName("message_offset"))); + Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(now.toEpochMilli()), + convertedMessage.getField(testDescriptor.findFieldByName("load_time"))); + Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(now.toEpochMilli()), + convertedMessage.getField(testDescriptor.findFieldByName("message_timestamp"))); + + serializedRows = protoPayload.getSerializedRows(1); + convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + Assert.assertEquals(10L, + convertedMessage.getField(testDescriptor.findFieldByName("message_partition"))); + Assert.assertEquals("test-topic", + convertedMessage.getField(testDescriptor.findFieldByName("message_topic"))); + Assert.assertEquals(144L, convertedMessage.getField(testDescriptor.findFieldByName("message_offset"))); + Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(now.toEpochMilli()), + convertedMessage.getField(testDescriptor.findFieldByName("load_time"))); + Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(now.toEpochMilli()), + convertedMessage.getField(testDescriptor.findFieldByName("message_timestamp"))); + } + + @Test + public void shouldHaveMetadataOnPayloadWithNameSpace() + throws InvalidProtocolBufferException, Descriptors.DescriptorValidationException { + System.setProperty("SINK_BIGQUERY_METADATA_NAMESPACE", "__kafka_metadata"); + TableSchema schema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("created_at") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.DATETIME) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("__kafka_metadata") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRUCT) + .addFields(TableFieldSchema.newBuilder() + .setName("message_offset") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("message_topic") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("load_time") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("message_timestamp") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.TIMESTAMP) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("message_partition") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.INT64) + .build()) + .build()) + .build(); + testDescriptor = BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(schema); + BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); + BigQueryProtoWriter writer = Mockito.mock(BigQueryProtoWriter.class); + converter = new BigQueryProtoStorageClient(writer, config, protoMessageParser); + Mockito.when(writer.getDescriptor()).thenReturn(testDescriptor); + + Instant now = Instant.now(); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .build(); + + List inputList = new ArrayList() { + { + add(new Message( + null, + m1.toByteArray(), + new Tuple<>("message_partition", 10), + new Tuple<>("message_topic", "test-topic"), + new Tuple<>("message_offset", 143), + new Tuple<>("load_time", now.toEpochMilli()), + new Tuple<>("message_timestamp", now.toEpochMilli()))); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + DynamicMessage metadata = (DynamicMessage) convertedMessage + .getField(testDescriptor.findFieldByName("__kafka_metadata")); + Assert.assertEquals(10L, metadata + .getField(metadata.getDescriptorForType().findFieldByName("message_partition"))); + Assert.assertEquals("test-topic", + metadata.getField(metadata.getDescriptorForType().findFieldByName("message_topic"))); + Assert.assertEquals(143L, + metadata.getField(metadata.getDescriptorForType().findFieldByName("message_offset"))); + Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(now.toEpochMilli()), + metadata.getField(metadata.getDescriptorForType().findFieldByName("load_time"))); + Assert.assertEquals(TimeUnit.MILLISECONDS.toMicros(now.toEpochMilli()), metadata + .getField(metadata.getDescriptorForType().findFieldByName("message_timestamp"))); + } + + @Test + public void shouldReturnInvalidRecords() throws InvalidProtocolBufferException { + Instant now = Instant.now(); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray(), new Tuple<>("message_offset", 11))); + add(new Message(null, "invalid".getBytes(StandardCharsets.UTF_8), + new Tuple<>("message_offset", 12))); + add(new Message(null, m1.toByteArray(), new Tuple<>("message_offset", 13))); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(2, protoPayload.getSerializedRowsCount()); + Assert.assertEquals(2, payload.getPayloadIndexes().size()); + Assert.assertTrue(payload.getPayloadIndexes().contains(0L)); + Assert.assertTrue(payload.getPayloadIndexes().contains(1L)); + Assert.assertFalse(payload.getPayloadIndexes().contains(2L)); + Assert.assertEquals(0L, payload.getInputIndex(0L)); + Assert.assertEquals(2L, payload.getInputIndex(1L)); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + long createdAt = (long) convertedMessage.getField(testDescriptor.findFieldByName("created_at")); + // Microseconds + Assert.assertEquals(TimeUnit.SECONDS.toMicros(now.getEpochSecond()), createdAt); + + List metas = new ArrayList<>(); + for (BigQueryRecordMeta r : payload) { + metas.add(r); + } + BigQueryRecordMeta validRecord = metas.get(0); + BigQueryRecordMeta invalidRecord = metas.get(1); + Assert.assertTrue(validRecord.isValid()); + Assert.assertFalse(invalidRecord.isValid()); + Assert.assertNull(validRecord.getErrorInfo()); + Assert.assertNotNull(invalidRecord.getErrorInfo()); + Assert.assertEquals(0, validRecord.getInputIndex()); + Assert.assertEquals(1, invalidRecord.getInputIndex()); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, invalidRecord.getErrorInfo().getErrorType()); + Assert.assertEquals( + "While parsing a protocol message, the input ended unexpectedly in the middle of a field. This could mean either that the input has been truncated or that an embedded message misreported its own length.", + invalidRecord.getErrorInfo().getException().getMessage()); + } + + @Test + public void shouldNotConvertFiveYearsOldTimeStamp() throws IOException { + Instant moreThanFiveYears = Instant.now().minus(Days.of(1826)); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(moreThanFiveYears.getEpochSecond()) + .build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(moreThanFiveYears.getEpochSecond()) + .build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(moreThanFiveYears.getEpochSecond()) + .build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(0, protoPayload.getSerializedRowsCount()); + List metas = new ArrayList<>(); + for (BigQueryRecordMeta r : payload) { + metas.add(r); + } + Assert.assertEquals(1, metas.size()); + Assert.assertEquals(ErrorType.INVALID_MESSAGE_ERROR, metas.get(0).getErrorInfo().getErrorType()); + Assert.assertTrue(metas.get(0).getErrorInfo().getException().getMessage() + .contains("is outside the allowed bounds. You can only stream to date range within 1825 days in the past and 366 days in the future relative to the current date.")); + } + + @Test + public void shouldConvertAnyTimeStampIfNotPartitionColumn() throws IOException { + Instant moreThanFiveYears = Instant.now().minus(Days.of(18216)); + Instant lessThanFiveYears = Instant.now().minus(Days.of(100)); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(lessThanFiveYears.getEpochSecond()) + .build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(moreThanFiveYears.getEpochSecond()) + .build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(moreThanFiveYears.getEpochSecond()) + .build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + List metas = new ArrayList<>(); + for (BigQueryRecordMeta r : payload) { + metas.add(r); + } + Assert.assertEquals(1, metas.size()); + Assert.assertNull(metas.get(0).getErrorInfo()); + } + + @Test + public void shouldNotConvertMoreThanOneYearFutureTimeStamp() throws IOException { + Instant moreThanOneYear = Instant.now().plus(Days.of(10000)); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(moreThanOneYear.getEpochSecond()) + .build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(moreThanOneYear.getEpochSecond()) + .build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(moreThanOneYear.getEpochSecond()) + .build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(0, protoPayload.getSerializedRowsCount()); + List metas = new ArrayList<>(); + for (BigQueryRecordMeta r : payload) { + metas.add(r); + } + Assert.assertEquals(1, metas.size()); + Assert.assertEquals(ErrorType.INVALID_MESSAGE_ERROR, metas.get(0).getErrorInfo().getErrorType()); + Assert.assertTrue(metas.get(0).getErrorInfo().getException().getMessage() + .contains("is outside the allowed bounds. You can only stream to date range within 1825 days in the past and 366 days in the future relative to the current date.")); + } + + @Test + public void shouldNotConvertIfInvalidTimeStamp() throws IOException { + Instant now = Instant.now(); + Instant invalid = Instant.ofEpochSecond(1111111111111111L); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(invalid.getEpochSecond()).build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(now.getEpochSecond()).build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(0, protoPayload.getSerializedRowsCount()); + List metas = new ArrayList<>(); + for (BigQueryRecordMeta r : payload) { + metas.add(r); + } + Assert.assertEquals(1, metas.size()); + Assert.assertEquals(ErrorType.INVALID_MESSAGE_ERROR, metas.get(0).getErrorInfo().getErrorType()); + Assert.assertTrue(metas.get(0).getErrorInfo().getException().getMessage() + .contains("is outside the allowed bounds in BQ")); + } + + @Test + public void shouldConvertTimeStampCloseToLimits() throws IOException { + Instant past = Instant.now().minus(Days.of(1824)); + Instant future = Instant.now().plus(Days.of(365)); + TestMessageBQ m1 = TestMessageBQ.newBuilder() + .setCreatedAt(Timestamp.newBuilder().setSeconds(past.getEpochSecond()).build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(future.getEpochSecond()).build()) + .addUpdatedAt(Timestamp.newBuilder().setSeconds(past.getEpochSecond()).build()) + .build(); + List inputList = new ArrayList() { + { + add(new Message(null, m1.toByteArray())); + } + }; + BigQueryPayload payload = converter.convert(inputList); + ProtoRows protoPayload = (ProtoRows) payload.getPayload(); + Assert.assertEquals(1, protoPayload.getSerializedRowsCount()); + ByteString serializedRows = protoPayload.getSerializedRows(0); + DynamicMessage convertedMessage = DynamicMessage.parseFrom(testDescriptor, serializedRows); + long createdAt = (long) convertedMessage.getField(testDescriptor.findFieldByName("created_at")); + // Microseconds + Assert.assertEquals(TimeUnit.SECONDS.toMicros(past.getEpochSecond()), createdAt); + List updatedAt = (List) convertedMessage.getField(testDescriptor.findFieldByName("updated_at")); + Assert.assertEquals(TimeUnit.SECONDS.toMicros(future.getEpochSecond()), updatedAt.get(0)); + Assert.assertEquals(TimeUnit.SECONDS.toMicros(past.getEpochSecond()), updatedAt.get(1)); + } +} diff --git a/src/test/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoWriterTest.java b/src/test/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoWriterTest.java new file mode 100644 index 00000000..f012e566 --- /dev/null +++ b/src/test/java/org/raystack/depot/bigquery/storage/proto/BigQueryProtoWriterTest.java @@ -0,0 +1,367 @@ +package org.raystack.depot.bigquery.storage.proto; + +import com.google.api.core.ApiFuture; +import com.google.api.gax.core.CredentialsProvider; +import com.google.cloud.bigquery.storage.v1.*; +import com.google.protobuf.Descriptors; +import org.raystack.depot.bigquery.storage.BigQueryPayload; +import org.raystack.depot.bigquery.storage.BigQueryStream; +import org.raystack.depot.bigquery.storage.BigQueryWriter; +import org.raystack.depot.bigquery.storage.BigQueryWriterFactory; +import org.raystack.depot.config.BigQuerySinkConfig; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.metrics.BigQueryMetrics; +import org.raystack.depot.metrics.Instrumentation; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import java.time.Instant; +import java.util.concurrent.ExecutionException; + +public class BigQueryProtoWriterTest { + private final StreamWriter writer = Mockito.mock(StreamWriter.class); + private final Instrumentation instrumentation = Mockito.mock(Instrumentation.class); + private final BigQuerySinkConfig config = Mockito.mock(BigQuerySinkConfig.class); + private final BigQueryMetrics metrics = Mockito.mock(BigQueryMetrics.class); + private BigQueryWriter bigQueryWriter; + + @Before + public void setup() { + Mockito.when(config.getSinkConnectorSchemaDataType()).thenReturn(SinkConnectorSchemaDataType.PROTOBUF); + Mockito.when(config.getGCloudProjectID()).thenReturn("test-project"); + Mockito.when(config.getDatasetName()).thenReturn("dataset"); + Mockito.when(config.getTableName()).thenReturn("table"); + Mockito.when(metrics.getBigqueryOperationTotalMetric()) + .thenReturn("application_sink_bigquery_operation_total"); + Mockito.when(metrics.getBigqueryOperationLatencyMetric()) + .thenReturn("application_sink_bigquery_operation_latency_milliseconds"); + BigQueryWriteClient bqwc = Mockito.mock(BigQueryWriteClient.class); + CredentialsProvider cp = Mockito.mock(CredentialsProvider.class); + BigQueryStream bqs = new BigQueryProtoStream(writer); + WriteStream ws = Mockito.mock(WriteStream.class); + TableSchema schema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("field1") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field2") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.INT64) + .build()) + .build(); + Mockito.when(ws.getTableSchema()).thenReturn(schema); + Mockito.when(bqwc.getWriteStream(Mockito.any(GetWriteStreamRequest.class))).thenReturn(ws); + bigQueryWriter = BigQueryWriterFactory.createBigQueryWriter(config, c -> bqwc, c -> cp, + (c, cr, p) -> bqs, instrumentation, metrics); + bigQueryWriter.init(); + } + + @Test + public void shouldInitStreamWriter() { + Descriptors.Descriptor descriptor = ((BigQueryProtoWriter) bigQueryWriter).getDescriptor(); + Assert.assertEquals(writer, ((BigQueryProtoWriter) bigQueryWriter).getStreamWriter()); + Assert.assertEquals("field1", descriptor.getFields().get(0).getName()); + Assert.assertEquals(Descriptors.FieldDescriptor.Type.STRING, descriptor.getFields().get(0).getType()); + Assert.assertFalse(descriptor.getFields().get(0).isRepeated()); + Assert.assertEquals("field2", descriptor.getFields().get(1).getName()); + Assert.assertEquals(Descriptors.FieldDescriptor.Type.INT64, descriptor.getFields().get(1).getType()); + Assert.assertTrue(descriptor.getFields().get(1).isRepeated()); + } + + @Test + public void shouldAppendAndGet() throws Exception { + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + AppendRowsResponse appendRowsResponse = bigQueryWriter.appendAndGet(payload); + Assert.assertEquals(apiResponse, appendRowsResponse); + } + + @Test + public void shouldRecreateStreamWriter() throws ExecutionException, InterruptedException { + // check previous schema + Descriptors.Descriptor descriptor = ((BigQueryProtoWriter) bigQueryWriter).getDescriptor(); + Assert.assertEquals(2, descriptor.getFields().size()); + TableSchema newSchema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("field1") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field2") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field3") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .build(); + + Mockito.when(writer.getUpdatedSchema()).thenReturn(newSchema); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + ProtoRows rows = Mockito.mock(ProtoRows.class); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + AppendRowsResponse appendRowsResponse = bigQueryWriter.appendAndGet(payload); + Mockito.verify(writer, Mockito.times(1)).close(); + Assert.assertEquals(apiResponse, appendRowsResponse); + descriptor = ((BigQueryProtoWriter) bigQueryWriter).getDescriptor(); + Assert.assertEquals(3, descriptor.getFields().size()); + Assert.assertEquals(writer, ((BigQueryProtoWriter) bigQueryWriter).getStreamWriter()); + Assert.assertEquals("field1", descriptor.getFields().get(0).getName()); + Assert.assertEquals(Descriptors.FieldDescriptor.Type.STRING, descriptor.getFields().get(0).getType()); + Assert.assertFalse(descriptor.getFields().get(0).isRepeated()); + Assert.assertEquals("field2", descriptor.getFields().get(1).getName()); + Assert.assertEquals(Descriptors.FieldDescriptor.Type.INT64, descriptor.getFields().get(1).getType()); + Assert.assertTrue(descriptor.getFields().get(1).isRepeated()); + Assert.assertEquals("field3", descriptor.getFields().get(2).getName()); + Assert.assertEquals(Descriptors.FieldDescriptor.Type.STRING, descriptor.getFields().get(2).getType()); + Assert.assertFalse(descriptor.getFields().get(2).isRepeated()); + Mockito.verify(instrumentation, Mockito.times(1)) + .logInfo("Updated table schema detected, recreating stream writer"); + } + + @Test + public void shouldCaptureMetricsForStreamWriterAppend() throws Exception { + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + bigQueryWriter.appendAndGet(payload); + + String tableName = String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()); + String datasetName = String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()); + String projectId = String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()); + String apiTag = String.format(BigQueryMetrics.BIGQUERY_API_TAG, + BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_APPEND); + + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + metrics.getBigqueryOperationTotalMetric(), + tableName, + datasetName, + projectId, + apiTag); + + Mockito.verify(instrumentation, Mockito.times(1)).captureDurationSince( + Mockito.eq(metrics.getBigqueryOperationLatencyMetric()), + Mockito.any(Instant.class), + Mockito.eq(tableName), + Mockito.eq(datasetName), + Mockito.eq(projectId), + Mockito.eq(apiTag)); + } + + @Test + public void shouldCaptureMetricsForStreamWriterCreatedOnceWhenUpdatedSchemaIsNotAvailable() throws Exception { + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + bigQueryWriter.appendAndGet(payload); + + String tableName = String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()); + String datasetName = String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()); + String projectId = String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()); + String apiTag = String.format(BigQueryMetrics.BIGQUERY_API_TAG, + BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CREATED); + + Mockito.verify(instrumentation, Mockito.times(0)) + .logInfo("Updated table schema detected, recreating stream writer"); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + metrics.getBigqueryOperationTotalMetric(), + tableName, + datasetName, + projectId, + apiTag); + Mockito.verify(instrumentation, Mockito.times(1)).captureDurationSince( + Mockito.eq(metrics.getBigqueryOperationLatencyMetric()), + Mockito.any(), + Mockito.eq(tableName), + Mockito.eq(datasetName), + Mockito.eq(projectId), + Mockito.eq(apiTag)); + } + + @Test + public void shouldCaptureMetricsForStreamWriterCreatedTwiceWhenUpdatedSchemaIsAvailable() throws Exception { + TableSchema newSchema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("field1") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field2") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field3") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .build(); + + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + Mockito.when(writer.getUpdatedSchema()).thenReturn(newSchema); + bigQueryWriter.appendAndGet(payload); + + String tableName = String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()); + String datasetName = String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()); + String projectId = String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()); + String apiTag = String.format(BigQueryMetrics.BIGQUERY_API_TAG, + BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CREATED); + + Mockito.verify(instrumentation, Mockito.times(1)) + .logInfo("Updated table schema detected, recreating stream writer"); + Mockito.verify(instrumentation, Mockito.times(2)).incrementCounter( + metrics.getBigqueryOperationTotalMetric(), + tableName, + datasetName, + projectId, + apiTag); + Mockito.verify(instrumentation, Mockito.times(2)).captureDurationSince( + Mockito.eq(metrics.getBigqueryOperationLatencyMetric()), + Mockito.any(), + Mockito.eq(tableName), + Mockito.eq(datasetName), + Mockito.eq(projectId), + Mockito.eq(apiTag)); + } + + @Test + public void shouldCaptureMetricsForStreamWriterClosedWhenUpdatedSchemaIsAvailable() throws Exception { + TableSchema newSchema = TableSchema.newBuilder() + .addFields(TableFieldSchema.newBuilder() + .setName("field1") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field2") + .setMode(TableFieldSchema.Mode.REPEATED) + .setType(TableFieldSchema.Type.INT64) + .build()) + .addFields(TableFieldSchema.newBuilder() + .setName("field3") + .setMode(TableFieldSchema.Mode.NULLABLE) + .setType(TableFieldSchema.Type.STRING) + .build()) + .build(); + + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + Mockito.when(writer.getUpdatedSchema()).thenReturn(newSchema); + + bigQueryWriter.appendAndGet(payload); + + String tableName = String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()); + String datasetName = String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()); + String projectId = String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()); + String apiTag = String.format(BigQueryMetrics.BIGQUERY_API_TAG, + BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CLOSED); + + Mockito.verify(instrumentation, Mockito.times(1)) + .logInfo("Updated table schema detected, recreating stream writer"); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + metrics.getBigqueryOperationTotalMetric(), + tableName, + datasetName, + projectId, + apiTag); + Mockito.verify(instrumentation, Mockito.times(1)).captureDurationSince( + Mockito.eq(metrics.getBigqueryOperationLatencyMetric()), + Mockito.any(), + Mockito.eq(tableName), + Mockito.eq(datasetName), + Mockito.eq(projectId), + Mockito.eq(apiTag)); + } + + @Test + public void shouldCaptureBigqueryPayloadSizeMetrics() throws Exception { + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + bigQueryWriter.appendAndGet(payload); + + String tableName = String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()); + String datasetName = String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()); + String projectId = String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()); + + Mockito.verify(instrumentation, Mockito.times(1)).captureCount( + Mockito.eq(metrics.getBigqueryPayloadSizeMetrics()), + Mockito.anyLong(), + Mockito.eq(tableName), + Mockito.eq(datasetName), + Mockito.eq(projectId)); + } + + @Test + public void shouldRecreateUnRecoverableStreamWriter() throws Exception { + Mockito.when(writer.isClosed()).thenReturn(true); + ProtoRows rows = Mockito.mock(ProtoRows.class); + org.raystack.depot.bigquery.storage.BigQueryPayload payload = new BigQueryPayload(); + payload.setPayload(rows); + ApiFuture future = Mockito.mock(ApiFuture.class); + AppendRowsResponse apiResponse = Mockito.mock(AppendRowsResponse.class); + Mockito.when(future.get()).thenReturn(apiResponse); + Mockito.when(writer.append(rows)).thenReturn(future); + bigQueryWriter.appendAndGet(payload); + + String tableName = String.format(BigQueryMetrics.BIGQUERY_TABLE_TAG, config.getTableName()); + String datasetName = String.format(BigQueryMetrics.BIGQUERY_DATASET_TAG, config.getDatasetName()); + String projectId = String.format(BigQueryMetrics.BIGQUERY_PROJECT_TAG, config.getGCloudProjectID()); + String apiTag = String.format(BigQueryMetrics.BIGQUERY_API_TAG, + BigQueryMetrics.BigQueryStorageAPIType.STREAM_WRITER_CREATED); + // Created twice, one for init() and another for closed + Mockito.verify(instrumentation, Mockito.times(2)).incrementCounter( + metrics.getBigqueryOperationTotalMetric(), + tableName, + datasetName, + projectId, + apiTag); + Mockito.verify(instrumentation, Mockito.times(2)).captureDurationSince( + Mockito.eq(metrics.getBigqueryOperationLatencyMetric()), + Mockito.any(), + Mockito.eq(tableName), + Mockito.eq(datasetName), + Mockito.eq(projectId), + Mockito.eq(apiTag)); + } +} diff --git a/src/test/java/io/odpf/depot/bigtable/BigTableSinkTest.java b/src/test/java/org/raystack/depot/bigtable/BigTableSinkTest.java similarity index 62% rename from src/test/java/io/odpf/depot/bigtable/BigTableSinkTest.java rename to src/test/java/org/raystack/depot/bigtable/BigTableSinkTest.java index b7442882..5c017c86 100644 --- a/src/test/java/io/odpf/depot/bigtable/BigTableSinkTest.java +++ b/src/test/java/org/raystack/depot/bigtable/BigTableSinkTest.java @@ -1,19 +1,19 @@ -package io.odpf.depot.bigtable; +package org.raystack.depot.bigtable; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.TestBookingLogKey; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestServiceType; -import io.odpf.depot.bigtable.client.BigTableClient; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.parser.BigTableRecordParser; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; +import org.raystack.depot.bigtable.client.BigTableClient; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.parser.BigTableRecordParser; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.TestBookingLogKey; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestServiceType; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.metrics.BigTableMetrics; import org.aeonbits.owner.util.Collections; import org.junit.Assert; import org.junit.Before; @@ -36,7 +36,7 @@ public class BigTableSinkTest { private BigTableMetrics bigtableMetrics; private BigTableSink bigTableSink; - private List messages; + private List messages; private List validRecords; private List invalidRecords; private ErrorInfo errorInfo; @@ -44,13 +44,17 @@ public class BigTableSinkTest { @Before public void setUp() { MockitoAnnotations.openMocks(this); - TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").build(); - TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").setServiceType(TestServiceType.Enum.GO_SEND).build(); - TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").build(); - TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").setServiceType(TestServiceType.Enum.GO_SHOP).build(); - - OdpfMessage message1 = new OdpfMessage(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); - OdpfMessage message2 = new OdpfMessage(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); + TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").build(); + TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").setServiceType(TestServiceType.Enum.GO_SEND).build(); + TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").build(); + TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").setServiceType(TestServiceType.Enum.GO_SHOP).build(); + + Message message1 = new Message(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); + Message message2 = new Message(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); messages = Collections.list(message1, message2); RowMutationEntry rowMutationEntry = RowMutationEntry.create("rowKey").setCell("family", "qualifier", "value"); @@ -63,7 +67,8 @@ public void setUp() { BigTableRecord bigTableRecord4 = new BigTableRecord(null, 4, errorInfo, message2.getMetadata()); invalidRecords = Collections.list(bigTableRecord3, bigTableRecord4); - bigTableSink = new BigTableSink(bigTableClient, bigTableRecordParser, bigtableMetrics, new Instrumentation(statsDReporter, BigTableSink.class)); + bigTableSink = new BigTableSink(bigTableClient, bigTableRecordParser, bigtableMetrics, + new Instrumentation(statsDReporter, BigTableSink.class)); } @Test @@ -71,17 +76,17 @@ public void shouldSendValidBigTableRecordsToBigTableSink() { Mockito.when(bigTableRecordParser.convert(messages)).thenReturn(validRecords); Mockito.when(bigTableClient.send(validRecords)).thenReturn(null); - OdpfSinkResponse response = bigTableSink.pushToSink(messages); + SinkResponse response = bigTableSink.pushToSink(messages); Mockito.verify(bigTableClient, Mockito.times(1)).send(validRecords); Assert.assertEquals(0, response.getErrors().size()); } @Test - public void shouldAddErrorsFromInvalidRecordsToOdpfResponse() { + public void shouldAddErrorsFromInvalidRecordsToResponse() { Mockito.when(bigTableRecordParser.convert(messages)).thenReturn(invalidRecords); - OdpfSinkResponse response = bigTableSink.pushToSink(messages); + SinkResponse response = bigTableSink.pushToSink(messages); Mockito.verify(bigTableClient, Mockito.times(0)).send(validRecords); Assert.assertTrue(response.hasErrors()); diff --git a/src/test/java/io/odpf/depot/bigtable/client/BigTableClientTest.java b/src/test/java/org/raystack/depot/bigtable/client/BigTableClientTest.java similarity index 75% rename from src/test/java/io/odpf/depot/bigtable/client/BigTableClientTest.java rename to src/test/java/org/raystack/depot/bigtable/client/BigTableClientTest.java index d39b74b6..302ddfff 100644 --- a/src/test/java/io/odpf/depot/bigtable/client/BigTableClientTest.java +++ b/src/test/java/org/raystack/depot/bigtable/client/BigTableClientTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigtable.client; +package org.raystack.depot.bigtable.client; import com.google.api.gax.rpc.ApiException; import com.google.bigtable.admin.v2.ColumnFamily; @@ -8,18 +8,18 @@ import com.google.cloud.bigtable.data.v2.models.BulkMutation; import com.google.cloud.bigtable.data.v2.models.MutateRowsException; import com.google.cloud.bigtable.data.v2.models.RowMutationEntry; -import io.odpf.depot.TestBookingLogKey; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestServiceType; -import io.odpf.depot.bigtable.exception.BigTableInvalidSchemaException; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.model.BigTableSchema; -import io.odpf.depot.bigtable.response.BigTableResponse; -import io.odpf.depot.config.BigTableSinkConfig; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.TestBookingLogKey; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestServiceType; +import org.raystack.depot.config.BigTableSinkConfig; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigtable.exception.BigTableInvalidSchemaException; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.model.BigTableSchema; +import org.raystack.depot.bigtable.response.BigTableResponse; +import org.raystack.depot.metrics.BigTableMetrics; import org.aeonbits.owner.ConfigFactory; import org.aeonbits.owner.util.Collections; import org.junit.Assert; @@ -60,22 +60,28 @@ public class BigTableClientTest { @Before public void setUp() throws IOException { MockitoAnnotations.openMocks(this); - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestBookingLogMessage"); - System.setProperty("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", String.valueOf(SinkConnectorSchemaMessageMode.LOG_MESSAGE)); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestBookingLogMessage"); + System.setProperty("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", + String.valueOf(SinkConnectorSchemaMessageMode.LOG_MESSAGE)); System.setProperty("SINK_BIGTABLE_GOOGLE_CLOUD_PROJECT_ID", "test-gcloud-project"); System.setProperty("SINK_BIGTABLE_INSTANCE_ID", "test-instance"); System.setProperty("SINK_BIGTABLE_TABLE_ID", "test-table"); System.setProperty("SINK_BIGTABLE_CREDENTIAL_PATH", "Users/github/bigtable/test-credential"); - System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", "{ \"family-test\" : { \"qualifier_name1\" : \"input_field1\", \"qualifier_name2\" : \"input_field2\"} }"); + System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", + "{ \"family-test\" : { \"qualifier_name1\" : \"input_field1\", \"qualifier_name2\" : \"input_field2\"} }"); System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-key-constant-string"); - TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").build(); - TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").setServiceType(TestServiceType.Enum.GO_SEND).build(); - TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").build(); - TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").setServiceType(TestServiceType.Enum.GO_SHOP).build(); + TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").build(); + TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").setServiceType(TestServiceType.Enum.GO_SEND).build(); + TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").build(); + TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").setServiceType(TestServiceType.Enum.GO_SHOP).build(); - OdpfMessage message1 = new OdpfMessage(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); - OdpfMessage message2 = new OdpfMessage(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); + Message message1 = new Message(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); + Message message2 = new Message(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); RowMutationEntry rowMutationEntry = RowMutationEntry.create("rowKey").setCell("family", "qualifier", "value"); BigTableRecord bigTableRecord1 = new BigTableRecord(rowMutationEntry, 1, null, message1.getMetadata()); @@ -83,7 +89,8 @@ public void setUp() throws IOException { validRecords = Collections.list(bigTableRecord1, bigTableRecord2); sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); BigTableSchema schema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); - bigTableClient = new BigTableClient(sinkConfig, bigTableDataClient, bigtableTableAdminClient, schema, bigtableMetrics, instrumentation); + bigTableClient = new BigTableClient(sinkConfig, bigTableDataClient, bigtableTableAdminClient, schema, + bigtableMetrics, instrumentation); } @Test @@ -108,7 +115,8 @@ public void shouldReturnBigTableResponseWithFailedMutationsWhenBulkMutateRowsThr Assert.assertTrue(bigTableResponse.hasErrors()); Assert.assertEquals(2, bigTableResponse.getFailedMutations().size()); - Mockito.verify(instrumentation, Mockito.times(1)).logError("Some entries failed to be applied. {}", mutateRowsException.getCause()); + Mockito.verify(instrumentation, Mockito.times(1)).logError("Some entries failed to be applied. {}", + mutateRowsException.getCause()); } @Test @@ -126,7 +134,8 @@ public void shouldThrowInvalidSchemaExceptionIfTableDoesNotExist() { @Test public void shouldThrowInvalidSchemaExceptionIfColumnFamilyDoesNotExist() { Table testTable = Table.fromProto(com.google.bigtable.admin.v2.Table.newBuilder() - .setName("projects/" + sinkConfig.getGCloudProjectID() + "/instances/" + sinkConfig.getInstanceId() + "/tables/" + sinkConfig.getTableId()) + .setName("projects/" + sinkConfig.getGCloudProjectID() + "/instances/" + sinkConfig.getInstanceId() + + "/tables/" + sinkConfig.getTableId()) .putColumnFamilies("existing-family-test", ColumnFamily.newBuilder().build()) .build()); @@ -145,11 +154,13 @@ public void shouldCaptureBigtableMetricsWhenBulkMutateRowsDoesNotThrowAnExceptio bigTableClient.send(validRecords); - Mockito.verify(instrumentation, Mockito.times(1)).captureDurationSince(eq(bigtableMetrics.getBigtableOperationLatencyMetric()), + Mockito.verify(instrumentation, Mockito.times(1)).captureDurationSince( + eq(bigtableMetrics.getBigtableOperationLatencyMetric()), any(), eq(String.format(BigTableMetrics.BIGTABLE_INSTANCE_TAG, sinkConfig.getInstanceId())), eq(String.format(BigTableMetrics.BIGTABLE_TABLE_TAG, sinkConfig.getTableId()))); - Mockito.verify(instrumentation, Mockito.times(1)).captureCount(eq(bigtableMetrics.getBigtableOperationTotalMetric()), + Mockito.verify(instrumentation, Mockito.times(1)).captureCount( + eq(bigtableMetrics.getBigtableOperationTotalMetric()), any(), eq(String.format(BigTableMetrics.BIGTABLE_INSTANCE_TAG, sinkConfig.getInstanceId())), eq(String.format(BigTableMetrics.BIGTABLE_TABLE_TAG, sinkConfig.getTableId()))); diff --git a/src/test/java/io/odpf/depot/bigtable/model/BigTableSchemaTest.java b/src/test/java/org/raystack/depot/bigtable/model/BigTableSchemaTest.java similarity index 84% rename from src/test/java/io/odpf/depot/bigtable/model/BigTableSchemaTest.java rename to src/test/java/org/raystack/depot/bigtable/model/BigTableSchemaTest.java index 811dbd34..97fc49e9 100644 --- a/src/test/java/io/odpf/depot/bigtable/model/BigTableSchemaTest.java +++ b/src/test/java/org/raystack/depot/bigtable/model/BigTableSchemaTest.java @@ -1,7 +1,7 @@ -package io.odpf.depot.bigtable.model; +package org.raystack.depot.bigtable.model; -import io.odpf.depot.config.BigTableSinkConfig; -import io.odpf.depot.exception.ConfigurationException; +import org.raystack.depot.config.BigTableSinkConfig; +import org.raystack.depot.exception.ConfigurationException; import org.aeonbits.owner.ConfigFactory; import org.json.JSONException; import org.junit.Assert; @@ -64,7 +64,8 @@ public void shouldGetColumnsForGivenColumnFamily() { public void shouldThrowConfigurationExceptionWhenColumnMappingIsEmpty() { System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", ""); BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); - ConfigurationException configurationException = assertThrows(ConfigurationException.class, () -> new BigTableSchema(sinkConfig.getColumnFamilyMapping())); + ConfigurationException configurationException = assertThrows(ConfigurationException.class, + () -> new BigTableSchema(sinkConfig.getColumnFamilyMapping())); Assert.assertEquals("Column Mapping should not be empty or null", configurationException.getMessage()); } @@ -107,31 +108,39 @@ public void shouldThrowJsonException() { bigtableSchema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); Set columnFamilies = bigtableSchema.getColumnFamilies(); Assert.assertEquals(2, columnFamilies.size()); - JSONException jsonException = assertThrows(JSONException.class, () -> bigtableSchema.getColumns("family_name3")); + JSONException jsonException = assertThrows(JSONException.class, + () -> bigtableSchema.getColumns("family_name3")); Assert.assertEquals("JSONObject[\"family_name3\"] not found.", jsonException.getMessage()); - jsonException = assertThrows(JSONException.class, () -> bigtableSchema.getField("family_name1", "qualifier_name3")); + jsonException = assertThrows(JSONException.class, + () -> bigtableSchema.getField("family_name1", "qualifier_name3")); Assert.assertEquals("JSONObject[\"qualifier_name3\"] not found.", jsonException.getMessage()); } @Test public void shouldReturnEmptySetOfMissingColumnFamilies() { - Set missingColumnFamilies = bigtableSchema.getMissingColumnFamilies(new HashSet() {{ - add("family_name1"); - add("family_name2"); - }}); + Set missingColumnFamilies = bigtableSchema.getMissingColumnFamilies(new HashSet() { + { + add("family_name1"); + add("family_name2"); + } + }); Assert.assertEquals(0, missingColumnFamilies.size()); } @Test public void shouldReturnMissingColumnFamilies() { - Set missingColumnFamilies = bigtableSchema.getMissingColumnFamilies(new HashSet() {{ - add("family_name3"); - add("family_name2"); - add("family_name4"); - }}); - Assert.assertEquals(new HashSet() {{ - add("family_name1"); - }}, missingColumnFamilies); + Set missingColumnFamilies = bigtableSchema.getMissingColumnFamilies(new HashSet() { + { + add("family_name3"); + add("family_name2"); + add("family_name4"); + } + }); + Assert.assertEquals(new HashSet() { + { + add("family_name1"); + } + }, missingColumnFamilies); } } diff --git a/src/test/java/io/odpf/depot/bigtable/parser/BigTableRecordParserTest.java b/src/test/java/org/raystack/depot/bigtable/parser/BigTableRecordParserTest.java similarity index 58% rename from src/test/java/io/odpf/depot/bigtable/parser/BigTableRecordParserTest.java rename to src/test/java/org/raystack/depot/bigtable/parser/BigTableRecordParserTest.java index 8c0b47e9..09efb256 100644 --- a/src/test/java/io/odpf/depot/bigtable/parser/BigTableRecordParserTest.java +++ b/src/test/java/org/raystack/depot/bigtable/parser/BigTableRecordParserTest.java @@ -1,27 +1,27 @@ -package io.odpf.depot.bigtable.parser; +package org.raystack.depot.bigtable.parser; import com.google.protobuf.Timestamp; -import io.odpf.depot.TestBookingLogKey; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestLocation; -import io.odpf.depot.TestServiceType; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.model.BigTableSchema; -import io.odpf.depot.common.Template; -import io.odpf.depot.common.Tuple; -import io.odpf.depot.config.BigTableSinkConfig; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.exception.EmptyMessageException; -import io.odpf.depot.exception.InvalidTemplateException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.OdpfMessageParser; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.utils.MessageConfigUtils; -import io.odpf.stencil.client.ClassLoadStencilClient; +import org.raystack.depot.TestBookingLogKey; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestLocation; +import org.raystack.depot.TestServiceType; +import org.raystack.depot.common.Template; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.config.BigTableSinkConfig; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.utils.MessageConfigUtils; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.model.BigTableSchema; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.exception.EmptyMessageException; +import org.raystack.depot.exception.InvalidTemplateException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.stencil.client.ClassLoadStencilClient; import org.aeonbits.owner.ConfigFactory; import org.aeonbits.owner.util.Collections; import org.junit.Before; @@ -46,55 +46,62 @@ public class BigTableRecordParserTest { @Mock private ClassLoadStencilClient stencilClient; @Mock - private OdpfMessageSchema schema; + private MessageSchema schema; @Mock - private OdpfMessageParser mockOdpfMessageParser; + private MessageParser mockMessageParser; @Mock private BigTableRowKeyParser mockBigTableRowKeyParser; @Mock - private ParsedOdpfMessage mockParsedOdpfMessage; + private ParsedMessage mockParsedMessage; private BigTableRecordParser bigTableRecordParser; - private List messages; + private List messages; private BigTableSinkConfig sinkConfig; @Before public void setUp() throws IOException, InvalidTemplateException { MockitoAnnotations.openMocks(this); - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestBookingLogMessage"); - System.setProperty("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", String.valueOf(SinkConnectorSchemaMessageMode.LOG_MESSAGE)); - System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\"} }"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestBookingLogMessage"); + System.setProperty("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", + String.valueOf(SinkConnectorSchemaMessageMode.LOG_MESSAGE)); + System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", + "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\"} }"); System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-key-constant-string"); - - TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").build(); - TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1") + TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").build(); + TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1") .setEventTimestamp(Timestamp.newBuilder().setSeconds(100L).setNanos(200).build()) .setServiceType(TestServiceType.Enum.GO_SEND) .setDriverPickupLocation(TestLocation.newBuilder().setLatitude(100D).setLongitude(200D).build()) .build(); - TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").build(); - TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2") + TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").build(); + TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2") .setEventTimestamp(Timestamp.newBuilder().setSeconds(101L).setNanos(202).build()) .setServiceType(TestServiceType.Enum.GO_SHOP) .setDriverPickupLocation(TestLocation.newBuilder().setLatitude(300D).setLongitude(400D).build()) .build(); - OdpfMessage message1 = new OdpfMessage(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); - OdpfMessage message2 = new OdpfMessage(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); + Message message1 = new Message(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); + Message message2 = new Message(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); messages = Collections.list(message1, message2); stencilClient = Mockito.mock(ClassLoadStencilClient.class, CALLS_REAL_METHODS); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(sinkConfig); BigTableSchema bigtableSchema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); - BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema); + BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser( + new Template(sinkConfig.getRowKeyTemplate()), schema); - bigTableRecordParser = new BigTableRecordParser(protoOdpfMessageParser, bigTableRowKeyParser, modeAndSchema, schema, bigtableSchema); + bigTableRecordParser = new BigTableRecordParser(protoMessageParser, bigTableRowKeyParser, modeAndSchema, schema, + bigtableSchema); } @Test - public void shouldReturnValidRecordsForListOfValidOdpfMessages() { + public void shouldReturnValidRecordsForListOfValidMessages() { List records = bigTableRecordParser.convert(messages); assertTrue(records.get(0).isValid()); assertTrue(records.get(1).isValid()); @@ -103,14 +110,18 @@ public void shouldReturnValidRecordsForListOfValidOdpfMessages() { } @Test - public void shouldReturnValidRecordsForListOfValidOdpfMessagesForComplexFieldsInColumnsMapping() throws InvalidTemplateException { - System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\", \"q3\" : \"driver_pickup_location\"} }"); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); + public void shouldReturnValidRecordsForListOfValidMessagesForComplexFieldsInColumnsMapping() + throws InvalidTemplateException { + System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", + "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\", \"q3\" : \"driver_pickup_location\"} }"); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(sinkConfig); - BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema); + BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser( + new Template(sinkConfig.getRowKeyTemplate()), schema); BigTableSchema bigtableSchema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); - bigTableRecordParser = new BigTableRecordParser(protoOdpfMessageParser, bigTableRowKeyParser, modeAndSchema, schema, bigtableSchema); + bigTableRecordParser = new BigTableRecordParser(protoMessageParser, bigTableRowKeyParser, modeAndSchema, schema, + bigtableSchema); List records = bigTableRecordParser.convert(messages); assertTrue(records.get(0).isValid()); @@ -120,14 +131,18 @@ public void shouldReturnValidRecordsForListOfValidOdpfMessagesForComplexFieldsIn } @Test - public void shouldReturnValidRecordsForListOfValidOdpfMessagesForNestedTimestampFieldsInColumnsMapping() throws InvalidTemplateException { - System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\", \"q3\" : \"event_timestamp.nanos\"} }"); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); + public void shouldReturnValidRecordsForListOfValidMessagesForNestedTimestampFieldsInColumnsMapping() + throws InvalidTemplateException { + System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", + "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\", \"q3\" : \"event_timestamp.nanos\"} }"); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(sinkConfig); - BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema); + BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser( + new Template(sinkConfig.getRowKeyTemplate()), schema); BigTableSchema bigtableSchema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); - bigTableRecordParser = new BigTableRecordParser(protoOdpfMessageParser, bigTableRowKeyParser, modeAndSchema, schema, bigtableSchema); + bigTableRecordParser = new BigTableRecordParser(protoMessageParser, bigTableRowKeyParser, modeAndSchema, schema, + bigtableSchema); List records = bigTableRecordParser.convert(messages); assertTrue(records.get(0).isValid()); @@ -137,14 +152,18 @@ public void shouldReturnValidRecordsForListOfValidOdpfMessagesForNestedTimestamp } @Test - public void shouldReturnValidRecordsForListOfValidOdpfMessagesForNestedFieldsInColumnsMapping() throws InvalidTemplateException { - System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\", \"q3\" : \"driver_pickup_location.latitude\"} }"); - ProtoOdpfMessageParser protoOdpfMessageParser = new ProtoOdpfMessageParser(stencilClient); + public void shouldReturnValidRecordsForListOfValidMessagesForNestedFieldsInColumnsMapping() + throws InvalidTemplateException { + System.setProperty("SINK_BIGTABLE_COLUMN_FAMILY_MAPPING", + "{ \"cf1\" : { \"q1\" : \"order_number\", \"q2\" : \"service_type\", \"q3\" : \"driver_pickup_location.latitude\"} }"); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(stencilClient); sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); Tuple modeAndSchema = MessageConfigUtils.getModeAndSchema(sinkConfig); - BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema); + BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser( + new Template(sinkConfig.getRowKeyTemplate()), schema); BigTableSchema bigtableSchema = new BigTableSchema(sinkConfig.getColumnFamilyMapping()); - bigTableRecordParser = new BigTableRecordParser(protoOdpfMessageParser, bigTableRowKeyParser, modeAndSchema, schema, bigtableSchema); + bigTableRecordParser = new BigTableRecordParser(protoMessageParser, bigTableRowKeyParser, modeAndSchema, schema, + bigtableSchema); List records = bigTableRecordParser.convert(messages); assertTrue(records.get(0).isValid()); @@ -154,21 +173,21 @@ public void shouldReturnValidRecordsForListOfValidOdpfMessagesForNestedFieldsInC } @Test - public void shouldReturnInvalidRecordForAnyNullOdpfMessage() { - List records = bigTableRecordParser.convert(Collections.list(new OdpfMessage(null, null))); + public void shouldReturnInvalidRecordForAnyNullMessage() { + List records = bigTableRecordParser.convert(Collections.list(new Message(null, null))); assertFalse(records.get(0).isValid()); assertNotNull(records.get(0).getErrorInfo()); } @Test - public void shouldCatchEmptyMessageExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsInvalidMessageError() throws IOException { - bigTableRecordParser = new BigTableRecordParser(mockOdpfMessageParser, + public void shouldCatchEmptyMessageExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsInvalidMessageError() + throws IOException { + bigTableRecordParser = new BigTableRecordParser(mockMessageParser, mockBigTableRowKeyParser, MessageConfigUtils.getModeAndSchema(sinkConfig), schema, - new BigTableSchema(sinkConfig.getColumnFamilyMapping()) - ); - when(mockOdpfMessageParser.parse(any(), any(), any())).thenThrow(EmptyMessageException.class); + new BigTableSchema(sinkConfig.getColumnFamilyMapping())); + when(mockMessageParser.parse(any(), any(), any())).thenThrow(EmptyMessageException.class); List bigTableRecords = bigTableRecordParser.convert(messages); @@ -179,14 +198,14 @@ public void shouldCatchEmptyMessageExceptionAndReturnAnInvalidBigtableRecordWith } @Test - public void shouldCatchConfigurationExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsUnknownFieldsError() throws IOException { - bigTableRecordParser = new BigTableRecordParser(mockOdpfMessageParser, + public void shouldCatchConfigurationExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsUnknownFieldsError() + throws IOException { + bigTableRecordParser = new BigTableRecordParser(mockMessageParser, mockBigTableRowKeyParser, MessageConfigUtils.getModeAndSchema(sinkConfig), schema, - new BigTableSchema(sinkConfig.getColumnFamilyMapping()) - ); - when(mockOdpfMessageParser.parse(any(), any(), any())).thenThrow(ConfigurationException.class); + new BigTableSchema(sinkConfig.getColumnFamilyMapping())); + when(mockMessageParser.parse(any(), any(), any())).thenThrow(ConfigurationException.class); List bigTableRecords = bigTableRecordParser.convert(messages); @@ -197,14 +216,14 @@ public void shouldCatchConfigurationExceptionAndReturnAnInvalidBigtableRecordWit } @Test - public void shouldCatchIOExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsDeserializationError() throws IOException { - bigTableRecordParser = new BigTableRecordParser(mockOdpfMessageParser, + public void shouldCatchIOExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsDeserializationError() + throws IOException { + bigTableRecordParser = new BigTableRecordParser(mockMessageParser, mockBigTableRowKeyParser, MessageConfigUtils.getModeAndSchema(sinkConfig), schema, - new BigTableSchema(sinkConfig.getColumnFamilyMapping()) - ); - when(mockOdpfMessageParser.parse(any(), any(), any())).thenThrow(IOException.class); + new BigTableSchema(sinkConfig.getColumnFamilyMapping())); + when(mockMessageParser.parse(any(), any(), any())).thenThrow(IOException.class); List bigTableRecords = bigTableRecordParser.convert(messages); @@ -215,15 +234,15 @@ public void shouldCatchIOExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeA } @Test - public void shouldCatchIllegalArgumentExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsUnknownFieldsError() throws IOException { - bigTableRecordParser = new BigTableRecordParser(mockOdpfMessageParser, + public void shouldCatchIllegalArgumentExceptionAndReturnAnInvalidBigtableRecordWithErrorTypeAsUnknownFieldsError() + throws IOException { + bigTableRecordParser = new BigTableRecordParser(mockMessageParser, mockBigTableRowKeyParser, MessageConfigUtils.getModeAndSchema(sinkConfig), schema, - new BigTableSchema(sinkConfig.getColumnFamilyMapping()) - ); - when(mockOdpfMessageParser.parse(any(), any(), any())).thenReturn(mockParsedOdpfMessage); - when(mockBigTableRowKeyParser.parse(mockParsedOdpfMessage)).thenThrow(IllegalArgumentException.class); + new BigTableSchema(sinkConfig.getColumnFamilyMapping())); + when(mockMessageParser.parse(any(), any(), any())).thenReturn(mockParsedMessage); + when(mockBigTableRowKeyParser.parse(mockParsedMessage)).thenThrow(IllegalArgumentException.class); List bigTableRecords = bigTableRecordParser.convert(messages); diff --git a/src/test/java/io/odpf/depot/bigtable/parser/BigTableResponseParserTest.java b/src/test/java/org/raystack/depot/bigtable/parser/BigTableResponseParserTest.java similarity index 75% rename from src/test/java/io/odpf/depot/bigtable/parser/BigTableResponseParserTest.java rename to src/test/java/org/raystack/depot/bigtable/parser/BigTableResponseParserTest.java index 7c5df022..52d3adfa 100644 --- a/src/test/java/io/odpf/depot/bigtable/parser/BigTableResponseParserTest.java +++ b/src/test/java/org/raystack/depot/bigtable/parser/BigTableResponseParserTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.bigtable.parser; +package org.raystack.depot.bigtable.parser; import com.google.api.gax.rpc.ApiException; import com.google.api.gax.rpc.ErrorDetails; @@ -8,16 +8,16 @@ import com.google.rpc.BadRequest; import com.google.rpc.PreconditionFailure; import com.google.rpc.QuotaFailure; -import io.odpf.depot.TestBookingLogKey; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestServiceType; -import io.odpf.depot.bigtable.model.BigTableRecord; -import io.odpf.depot.bigtable.response.BigTableResponse; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.metrics.BigTableMetrics; -import io.odpf.depot.metrics.Instrumentation; +import org.raystack.depot.TestBookingLogKey; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestServiceType; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.bigtable.model.BigTableRecord; +import org.raystack.depot.bigtable.response.BigTableResponse; +import org.raystack.depot.metrics.BigTableMetrics; import org.aeonbits.owner.util.Collections; import org.junit.Before; import org.junit.Test; @@ -58,16 +58,22 @@ public void setUp() { Mockito.when(errorDetails.getQuotaFailure()).thenReturn(null); Mockito.when(errorDetails.getPreconditionFailure()).thenReturn(null); - TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").build(); - TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1").setOrderUrl("order-url#1").setServiceType(TestServiceType.Enum.GO_SEND).build(); - TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").build(); - TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2").setOrderUrl("order-url#2").setServiceType(TestServiceType.Enum.GO_SHOP).build(); - - OdpfMessage message1 = new OdpfMessage(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); - OdpfMessage message2 = new OdpfMessage(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); - - RowMutationEntry rowMutationEntry1 = RowMutationEntry.create("rowKey1").setCell("family1", "qualifier1", "value1"); - RowMutationEntry rowMutationEntry2 = RowMutationEntry.create("rowKey2").setCell("family2", "qualifier2", "value2"); + TestBookingLogKey bookingLogKey1 = TestBookingLogKey.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").build(); + TestBookingLogMessage bookingLogMessage1 = TestBookingLogMessage.newBuilder().setOrderNumber("order#1") + .setOrderUrl("order-url#1").setServiceType(TestServiceType.Enum.GO_SEND).build(); + TestBookingLogKey bookingLogKey2 = TestBookingLogKey.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").build(); + TestBookingLogMessage bookingLogMessage2 = TestBookingLogMessage.newBuilder().setOrderNumber("order#2") + .setOrderUrl("order-url#2").setServiceType(TestServiceType.Enum.GO_SHOP).build(); + + Message message1 = new Message(bookingLogKey1.toByteArray(), bookingLogMessage1.toByteArray()); + Message message2 = new Message(bookingLogKey2.toByteArray(), bookingLogMessage2.toByteArray()); + + RowMutationEntry rowMutationEntry1 = RowMutationEntry.create("rowKey1").setCell("family1", "qualifier1", + "value1"); + RowMutationEntry rowMutationEntry2 = RowMutationEntry.create("rowKey2").setCell("family2", "qualifier2", + "value2"); BigTableRecord bigTableRecord1 = new BigTableRecord(rowMutationEntry1, 0, null, message1.getMetadata()); BigTableRecord bigTableRecord2 = new BigTableRecord(rowMutationEntry2, 1, null, message2.getMetadata()); validRecords = Collections.list(bigTableRecord1, bigTableRecord2); @@ -83,7 +89,8 @@ public void shouldReturnErrorInfoMapWithRetryableError() { Mockito.when(code.getHttpStatusCode()).thenReturn(400); Mockito.when(apiException.isRetryable()).thenReturn(Boolean.TRUE); - Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, + bigtableResponse, bigtableMetrics, instrumentation); Assertions.assertEquals(ErrorType.SINK_RETRYABLE_ERROR, errorsFromSinkResponse.get(1L).getErrorType()); Assertions.assertEquals(apiException, errorsFromSinkResponse.get(1L).getException()); @@ -98,7 +105,8 @@ public void shouldReturnErrorInfoMapWith4XXError() { Mockito.when(code.getHttpStatusCode()).thenReturn(400); - Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, + bigtableResponse, bigtableMetrics, instrumentation); Assertions.assertEquals(ErrorType.SINK_4XX_ERROR, errorsFromSinkResponse.get(1L).getErrorType()); Assertions.assertEquals(apiException, errorsFromSinkResponse.get(1L).getException()); @@ -113,7 +121,8 @@ public void shouldReturnErrorInfoMapWith5XXError() { Mockito.when(code.getHttpStatusCode()).thenReturn(500); - Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, + bigtableResponse, bigtableMetrics, instrumentation); Assertions.assertEquals(ErrorType.SINK_5XX_ERROR, errorsFromSinkResponse.get(1L).getErrorType()); Assertions.assertEquals(apiException, errorsFromSinkResponse.get(1L).getException()); @@ -128,7 +137,8 @@ public void shouldReturnErrorInfoMapWithUnknownError() { Mockito.when(code.getHttpStatusCode()).thenReturn(0); - Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + Map errorsFromSinkResponse = BigTableResponseParser.getErrorsFromSinkResponse(validRecords, + bigtableResponse, bigtableMetrics, instrumentation); Assertions.assertEquals(ErrorType.SINK_UNKNOWN_ERROR, errorsFromSinkResponse.get(1L).getErrorType()); Assertions.assertEquals(apiException, errorsFromSinkResponse.get(1L).getException()); @@ -144,9 +154,12 @@ public void shouldCaptureMetricBigtableErrorTypeBadRequest() { Mockito.when(code.getHttpStatusCode()).thenReturn(0); Mockito.when(errorDetails.getBadRequest()).thenReturn(BadRequest.getDefaultInstance()); - BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, + instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(bigtableMetrics.getBigtableTotalErrorsMetrics(), String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.BAD_REQUEST)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + bigtableMetrics.getBigtableTotalErrorsMetrics(), + String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.BAD_REQUEST)); } @Test @@ -159,9 +172,12 @@ public void shouldCaptureMetricBigtableErrorTypeQuotaFailure() { Mockito.when(code.getHttpStatusCode()).thenReturn(0); Mockito.when(errorDetails.getQuotaFailure()).thenReturn(QuotaFailure.getDefaultInstance()); - BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, + instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(bigtableMetrics.getBigtableTotalErrorsMetrics(), String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.QUOTA_FAILURE)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + bigtableMetrics.getBigtableTotalErrorsMetrics(), + String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.QUOTA_FAILURE)); } @Test @@ -174,9 +190,12 @@ public void shouldCaptureMetricBigtableErrorTypePreconditionFailure() { Mockito.when(code.getHttpStatusCode()).thenReturn(0); Mockito.when(errorDetails.getPreconditionFailure()).thenReturn(PreconditionFailure.getDefaultInstance()); - BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, + instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(bigtableMetrics.getBigtableTotalErrorsMetrics(), String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.PRECONDITION_FAILURE)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + bigtableMetrics.getBigtableTotalErrorsMetrics(), String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, + BigTableMetrics.BigTableErrorType.PRECONDITION_FAILURE)); } @Test @@ -188,9 +207,12 @@ public void shouldCaptureMetricBigtableErrorTypeRpcFailureByDefault() { Mockito.when(code.getHttpStatusCode()).thenReturn(0); - BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, + instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(bigtableMetrics.getBigtableTotalErrorsMetrics(), String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.RPC_FAILURE)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + bigtableMetrics.getBigtableTotalErrorsMetrics(), + String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.RPC_FAILURE)); } @Test @@ -203,9 +225,12 @@ public void shouldCaptureMetricBigtableErrorTypeRpcFailureIfErrorDetailsIsNull() Mockito.when(apiException.getErrorDetails()).thenReturn(null); Mockito.when(code.getHttpStatusCode()).thenReturn(0); - BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, + instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter(bigtableMetrics.getBigtableTotalErrorsMetrics(), String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.RPC_FAILURE)); + Mockito.verify(instrumentation, Mockito.times(1)).incrementCounter( + bigtableMetrics.getBigtableTotalErrorsMetrics(), + String.format(BigTableMetrics.BIGTABLE_ERROR_TAG, BigTableMetrics.BigTableErrorType.RPC_FAILURE)); } @Test @@ -218,9 +243,11 @@ public void shouldLogErrorRecordWithReasonAndStatusCode() { Mockito.when(code.getHttpStatusCode()).thenReturn(0); Mockito.when(errorDetails.getPreconditionFailure()).thenReturn(PreconditionFailure.getDefaultInstance()); - BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, instrumentation); + BigTableResponseParser.getErrorsFromSinkResponse(validRecords, bigtableResponse, bigtableMetrics, + instrumentation); - Mockito.verify(instrumentation, Mockito.times(1)).logError("Error while inserting to Bigtable. Record Metadata: {}, Cause: {}, Reason: {}, StatusCode: {}, HttpCode: {}", + Mockito.verify(instrumentation, Mockito.times(1)).logError( + "Error while inserting to Bigtable. Record Metadata: {}, Cause: {}, Reason: {}, StatusCode: {}, HttpCode: {}", validRecords.get(1).getMetadata(), failedMutations.get(0).getError().getCause(), failedMutations.get(0).getError().getReason(), diff --git a/src/test/java/org/raystack/depot/bigtable/parser/BigTableRowKeyParserTest.java b/src/test/java/org/raystack/depot/bigtable/parser/BigTableRowKeyParserTest.java new file mode 100644 index 00000000..5f01d73d --- /dev/null +++ b/src/test/java/org/raystack/depot/bigtable/parser/BigTableRowKeyParserTest.java @@ -0,0 +1,110 @@ +package org.raystack.depot.bigtable.parser; + +import com.google.protobuf.Descriptors; +import com.timgroup.statsd.NoOpStatsDClient; +import org.raystack.depot.TestKey; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestNestedMessage; +import org.raystack.depot.TestNestedRepeatedMessage; +import org.raystack.depot.common.Template; +import org.raystack.depot.config.BigTableSinkConfig; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.exception.InvalidTemplateException; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Test; +import org.junit.jupiter.api.Assertions; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class BigTableRowKeyParserTest { + + private final Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); + put(String.format("%s", TestNestedRepeatedMessage.class.getName()), + TestNestedRepeatedMessage.getDescriptor()); + } + }; + + @Test + public void shouldReturnParsedRowKeyForValidParameterisedTemplate() throws IOException, InvalidTemplateException { + System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-%s$key#%s*test,order_number,order_details"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestMessage"); + BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); + + ProtoMessageParser messageParser = new ProtoMessageParser(sinkConfig, + new StatsDReporter(new NoOpStatsDClient()), null); + MessageSchema schema = messageParser.getSchema(sinkConfig.getSinkConnectorSchemaProtoMessageClass(), + descriptorsMap); + + byte[] logMessage = TestMessage.newBuilder() + .setOrderNumber("xyz-order") + .setOrderDetails("eureka") + .build() + .toByteArray(); + Message message = new Message(null, logMessage); + ParsedMessage parsedMessage = messageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + sinkConfig.getSinkConnectorSchemaProtoMessageClass()); + + BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser( + new Template(sinkConfig.getRowKeyTemplate()), schema); + String parsedRowKey = bigTableRowKeyParser.parse(parsedMessage); + assertEquals("row-xyz-order$key#eureka*test", parsedRowKey); + } + + @Test + public void shouldReturnTheRowKeySameAsTemplateWhenTemplateIsValidAndContainsOnlyConstantStrings() + throws IOException, InvalidTemplateException { + System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-key#constant$String"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestMessage"); + BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); + + ProtoMessageParser messageParser = new ProtoMessageParser(sinkConfig, + new StatsDReporter(new NoOpStatsDClient()), null); + MessageSchema schema = messageParser.getSchema(sinkConfig.getSinkConnectorSchemaProtoMessageClass(), + descriptorsMap); + + byte[] logMessage = TestMessage.newBuilder() + .setOrderNumber("xyz-order") + .setOrderDetails("eureka") + .build() + .toByteArray(); + Message message = new Message(null, logMessage); + ParsedMessage parsedMessage = messageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + sinkConfig.getSinkConnectorSchemaProtoMessageClass()); + + BigTableRowKeyParser bigTableRowKeyParser = new BigTableRowKeyParser( + new Template(sinkConfig.getRowKeyTemplate()), schema); + String parsedRowKey = bigTableRowKeyParser.parse(parsedMessage); + assertEquals("row-key#constant$String", parsedRowKey); + } + + @Test + public void shouldThrowErrorForInvalidTemplate() throws IOException { + System.setProperty("SINK_BIGTABLE_ROW_KEY_TEMPLATE", "row-key%s"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestMessage"); + BigTableSinkConfig sinkConfig = ConfigFactory.create(BigTableSinkConfig.class, System.getProperties()); + + ProtoMessageParser messageParser = new ProtoMessageParser(sinkConfig, + new StatsDReporter(new NoOpStatsDClient()), null); + MessageSchema schema = messageParser.getSchema(sinkConfig.getSinkConnectorSchemaProtoMessageClass(), + descriptorsMap); + + InvalidTemplateException illegalArgumentException = Assertions.assertThrows(InvalidTemplateException.class, + () -> new BigTableRowKeyParser(new Template(sinkConfig.getRowKeyTemplate()), schema)); + assertEquals("Template is not valid, variables=1, validArgs=1, values=0", + illegalArgumentException.getMessage()); + } + +} diff --git a/src/test/java/io/odpf/depot/common/TemplateTest.java b/src/test/java/org/raystack/depot/common/TemplateTest.java similarity index 64% rename from src/test/java/io/odpf/depot/common/TemplateTest.java rename to src/test/java/org/raystack/depot/common/TemplateTest.java index f64bfe51..d547429b 100644 --- a/src/test/java/io/odpf/depot/common/TemplateTest.java +++ b/src/test/java/org/raystack/depot/common/TemplateTest.java @@ -1,22 +1,22 @@ -package io.odpf.depot.common; +package org.raystack.depot.common; import com.google.protobuf.Descriptors; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestKey; -import io.odpf.depot.TestLocation; -import io.odpf.depot.TestMessage; -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.config.enums.SinkConnectorSchemaDataType; -import io.odpf.depot.exception.InvalidTemplateException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageParserFactory; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.message.proto.ProtoOdpfParsedMessage; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.stencil.Parser; -import io.odpf.stencil.StencilClientFactory; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestKey; +import org.raystack.depot.TestLocation; +import org.raystack.depot.TestMessage; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.message.proto.ProtoParsedMessage; +import org.raystack.stencil.Parser; +import org.raystack.stencil.StencilClientFactory; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.exception.InvalidTemplateException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParserFactory; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.metrics.StatsDReporter; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -35,13 +35,13 @@ @RunWith(MockitoJUnitRunner.class) public class TemplateTest { @Mock - private OdpfSinkConfig sinkConfig; + private SinkConfig sinkConfig; @Mock private StatsDReporter statsDReporter; - private ParsedOdpfMessage parsedTestMessage; - private ParsedOdpfMessage parsedBookingMessage; - private OdpfMessageSchema schemaTest; - private OdpfMessageSchema schemaBooking; + private ParsedMessage parsedTestMessage; + private ParsedMessage parsedBookingMessage; + private MessageSchema schemaTest; + private MessageSchema schemaBooking; @Before public void setUp() throws Exception { @@ -52,24 +52,30 @@ public void setUp() throws Exception { .setAmountPaidByCash(12.3F) .setDriverPickupLocation(TestLocation.newBuilder().setLongitude(10.0).setLatitude(23.9).build()) .build(); - TestMessage testMessage = TestMessage.newBuilder().setOrderNumber("test-order").setOrderDetails("ORDER-DETAILS").build(); - OdpfMessage message = new OdpfMessage(testKey.toByteArray(), testMessage.toByteArray()); - OdpfMessage bookingMessage = new OdpfMessage(testKey.toByteArray(), testBookingLogMessage.toByteArray()); - Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - put(String.format("%s", TestBookingLogMessage.class.getName()), TestBookingLogMessage.getDescriptor()); - put(String.format("%s", TestBookingLogMessage.TopicMetadata.class.getName()), TestBookingLogMessage.TopicMetadata.getDescriptor()); - put(String.format("%s", TestLocation.class.getName()), TestLocation.getDescriptor()); - }}; + TestMessage testMessage = TestMessage.newBuilder().setOrderNumber("test-order").setOrderDetails("ORDER-DETAILS") + .build(); + Message message = new Message(testKey.toByteArray(), testMessage.toByteArray()); + Message bookingMessage = new Message(testKey.toByteArray(), testBookingLogMessage.toByteArray()); + Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + put(String.format("%s", TestBookingLogMessage.class.getName()), TestBookingLogMessage.getDescriptor()); + put(String.format("%s", TestBookingLogMessage.TopicMetadata.class.getName()), + TestBookingLogMessage.TopicMetadata.getDescriptor()); + put(String.format("%s", TestLocation.class.getName()), TestLocation.getDescriptor()); + } + }; Parser protoParserTest = StencilClientFactory.getClient().getParser(TestMessage.class.getName()); - parsedTestMessage = new ProtoOdpfParsedMessage(protoParserTest.parse((byte[]) message.getLogMessage())); + parsedTestMessage = new ProtoParsedMessage(protoParserTest.parse((byte[]) message.getLogMessage())); Parser protoParserBooking = StencilClientFactory.getClient().getParser(TestBookingLogMessage.class.getName()); - parsedBookingMessage = new ProtoOdpfParsedMessage(protoParserBooking.parse((byte[]) bookingMessage.getLogMessage())); + parsedBookingMessage = new ProtoParsedMessage( + protoParserBooking.parse((byte[]) bookingMessage.getLogMessage())); when(sinkConfig.getSinkConnectorSchemaDataType()).thenReturn(SinkConnectorSchemaDataType.PROTOBUF); - ProtoOdpfMessageParser messageParser = (ProtoOdpfMessageParser) OdpfMessageParserFactory.getParser(sinkConfig, statsDReporter); - schemaTest = messageParser.getSchema("io.odpf.depot.TestMessage", descriptorsMap); - schemaBooking = messageParser.getSchema("io.odpf.depot.TestBookingLogMessage", descriptorsMap); + ProtoMessageParser messageParser = (ProtoMessageParser) MessageParserFactory.getParser(sinkConfig, + statsDReporter); + schemaTest = messageParser.getSchema("org.raystack.depot.TestMessage", descriptorsMap); + schemaBooking = messageParser.getSchema("org.raystack.depot.TestBookingLogMessage", descriptorsMap); } @Test @@ -116,7 +122,8 @@ public void shouldAcceptStringForCollectionKey() throws InvalidTemplateException @Test public void shouldNotAcceptStringWithPatternForCollectionKeyWithEmptyVariables() { - InvalidTemplateException e = assertThrows(InvalidTemplateException.class, () -> new Template("Test-%s%d%b,t1,t2")); + InvalidTemplateException e = assertThrows(InvalidTemplateException.class, + () -> new Template("Test-%s%d%b,t1,t2")); Assert.assertEquals("Template is not valid, variables=3, validArgs=3, values=2", e.getMessage()); e = assertThrows(InvalidTemplateException.class, () -> new Template("Test-%s%s%y,order_number,order_details")); diff --git a/src/test/java/io/odpf/depot/config/BigQuerySinkConfigTest.java b/src/test/java/org/raystack/depot/config/BigQuerySinkConfigTest.java similarity index 62% rename from src/test/java/io/odpf/depot/config/BigQuerySinkConfigTest.java rename to src/test/java/org/raystack/depot/config/BigQuerySinkConfigTest.java index 4cc9892a..1c2f0cb7 100644 --- a/src/test/java/io/odpf/depot/config/BigQuerySinkConfigTest.java +++ b/src/test/java/org/raystack/depot/config/BigQuerySinkConfigTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config; +package org.raystack.depot.config; -import io.odpf.depot.common.TupleString; +import org.raystack.depot.common.TupleString; import org.aeonbits.owner.ConfigFactory; import org.junit.Assert; import org.junit.Test; @@ -8,20 +8,21 @@ import java.util.ArrayList; import java.util.List; - public class BigQuerySinkConfigTest { @Test public void testMetadataTypes() { - System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "io.odpf.depot.TestKeyBQ"); + System.setProperty("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestKeyBQ"); System.setProperty("SINK_BIGQUERY_ENABLE_AUTO_SCHEMA_UPDATE", "false"); System.setProperty("SINK_BIGQUERY_METADATA_COLUMNS_TYPES", "topic=string,partition=integer,offset=integer"); BigQuerySinkConfig config = ConfigFactory.create(BigQuerySinkConfig.class, System.getProperties()); List metadataColumnsTypes = config.getMetadataColumnsTypes(); - Assert.assertEquals(new ArrayList() {{ - add(new TupleString("topic", "string")); - add(new TupleString("partition", "integer")); - add(new TupleString("offset", "integer")); - }}, metadataColumnsTypes); + Assert.assertEquals(new ArrayList() { + { + add(new TupleString("topic", "string")); + add(new TupleString("partition", "integer")); + add(new TupleString("offset", "integer")); + } + }, metadataColumnsTypes); } } diff --git a/src/test/java/io/odpf/depot/config/RedisSinkConfigTest.java b/src/test/java/org/raystack/depot/config/RedisSinkConfigTest.java similarity index 82% rename from src/test/java/io/odpf/depot/config/RedisSinkConfigTest.java rename to src/test/java/org/raystack/depot/config/RedisSinkConfigTest.java index 6b31c7fe..59cee5a0 100644 --- a/src/test/java/io/odpf/depot/config/RedisSinkConfigTest.java +++ b/src/test/java/org/raystack/depot/config/RedisSinkConfigTest.java @@ -1,7 +1,7 @@ -package io.odpf.depot.config; +package org.raystack.depot.config; -import io.odpf.depot.redis.enums.RedisSinkDeploymentType; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.redis.enums.RedisSinkDeploymentType; +import org.raystack.depot.redis.enums.RedisSinkTtlType; import org.aeonbits.owner.ConfigFactory; import org.junit.Assert; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/config/converter/ConfToListConverterTest.java b/src/test/java/org/raystack/depot/config/converter/ConfToListConverterTest.java similarity index 84% rename from src/test/java/io/odpf/depot/config/converter/ConfToListConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/ConfToListConverterTest.java index fa26929e..3b5cc6ec 100644 --- a/src/test/java/io/odpf/depot/config/converter/ConfToListConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/ConfToListConverterTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.common.TupleString; +import org.raystack.depot.common.TupleString; import org.junit.Assert; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/config/converter/ConverterUtilsTest.java b/src/test/java/org/raystack/depot/config/converter/ConverterUtilsTest.java similarity index 87% rename from src/test/java/io/odpf/depot/config/converter/ConverterUtilsTest.java rename to src/test/java/org/raystack/depot/config/converter/ConverterUtilsTest.java index dec85f20..4ddee6c9 100644 --- a/src/test/java/io/odpf/depot/config/converter/ConverterUtilsTest.java +++ b/src/test/java/org/raystack/depot/config/converter/ConverterUtilsTest.java @@ -1,12 +1,11 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.common.Tuple; +import org.raystack.depot.common.Tuple; import org.junit.Assert; import org.junit.Test; import java.util.List; - public class ConverterUtilsTest { @Test diff --git a/src/test/java/io/odpf/depot/config/converter/JsonToPropertiesConverterTest.java b/src/test/java/org/raystack/depot/config/converter/JsonToPropertiesConverterTest.java similarity index 86% rename from src/test/java/io/odpf/depot/config/converter/JsonToPropertiesConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/JsonToPropertiesConverterTest.java index 2c6582d7..5d6376cf 100644 --- a/src/test/java/io/odpf/depot/config/converter/JsonToPropertiesConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/JsonToPropertiesConverterTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; import org.junit.Assert; import org.junit.Rule; @@ -29,18 +29,22 @@ public void shouldConvertJSONConfigToProperties() { @Test public void shouldValidateJsonConfigForDuplicates() { String json = "{\"order_number\":\"ORDER_NUMBER\",\"event_timestamp\":\"TIMESTAMP\",\"driver_id\":\"TIMESTAMP\"}"; - IllegalArgumentException e = Assert.assertThrows(IllegalArgumentException.class, () -> new JsonToPropertiesConverter().convert(null, json)); - Assert.assertEquals("duplicates found in SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING for : [TIMESTAMP]", e.getMessage()); + IllegalArgumentException e = Assert.assertThrows(IllegalArgumentException.class, + () -> new JsonToPropertiesConverter().convert(null, json)); + Assert.assertEquals("duplicates found in SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING for : [TIMESTAMP]", + e.getMessage()); } @Test public void shouldValidateJsonConfigForDuplicatesInNestedJsons() { String json = "{\"order_number\":\"ORDER_NUMBER\",\"event_timestamp\":\"TIMESTAMP\",\"nested\":{\"1\":\"TIMESTAMP\",\"2\":\"ORDER_NUMBER\"}}"; - IllegalArgumentException e = Assert.assertThrows(IllegalArgumentException.class, () -> new JsonToPropertiesConverter().convert(null, json)); + IllegalArgumentException e = Assert.assertThrows(IllegalArgumentException.class, + () -> new JsonToPropertiesConverter().convert(null, json)); String message = e.getMessage(); String[] actualMessage = (message.split(" : ")); Assert.assertEquals("duplicates found in SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING for", actualMessage[0]); - Assert.assertTrue("[ORDER_NUMBER, TIMESTAMP]" .equals(actualMessage[1]) || "[TIMESTAMP, ORDER_NUMBER]" .equals(actualMessage[1])); + Assert.assertTrue("[ORDER_NUMBER, TIMESTAMP]".equals(actualMessage[1]) + || "[TIMESTAMP, ORDER_NUMBER]".equals(actualMessage[1])); } @Test diff --git a/src/test/java/io/odpf/depot/config/converter/LabelMapConverterTest.java b/src/test/java/org/raystack/depot/config/converter/LabelMapConverterTest.java similarity index 61% rename from src/test/java/io/odpf/depot/config/converter/LabelMapConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/LabelMapConverterTest.java index bb672fad..faced405 100644 --- a/src/test/java/io/odpf/depot/config/converter/LabelMapConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/LabelMapConverterTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; import org.junit.Assert; import org.junit.Test; @@ -6,7 +6,6 @@ import java.util.Collections; import java.util.HashMap; - public class LabelMapConverterTest { @Test @@ -18,10 +17,12 @@ public void shouldConvertToEmptyMap() { @Test public void shouldConvertToMap() { LabelMapConverter converter = new LabelMapConverter(); - Assert.assertEquals(new HashMap() {{ - put("a", "b"); - put("c", "d"); - put("test", "testing"); - }}, converter.convert(null, "a=b,c=d,test=testing")); + Assert.assertEquals(new HashMap() { + { + put("a", "b"); + put("c", "d"); + put("test", "testing"); + } + }, converter.convert(null, "a=b,c=d,test=testing")); } } diff --git a/src/test/java/io/odpf/depot/config/converter/RedisSinkDataTypeConverterTest.java b/src/test/java/org/raystack/depot/config/converter/RedisSinkDataTypeConverterTest.java similarity index 94% rename from src/test/java/io/odpf/depot/config/converter/RedisSinkDataTypeConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/RedisSinkDataTypeConverterTest.java index b66d2d32..1f166e4d 100644 --- a/src/test/java/io/odpf/depot/config/converter/RedisSinkDataTypeConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/RedisSinkDataTypeConverterTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.redis.enums.RedisSinkDataType; +import org.raystack.depot.redis.enums.RedisSinkDataType; import org.gradle.internal.impldep.org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/config/converter/RedisSinkDeploymentTypeConverterTest.java b/src/test/java/org/raystack/depot/config/converter/RedisSinkDeploymentTypeConverterTest.java similarity index 94% rename from src/test/java/io/odpf/depot/config/converter/RedisSinkDeploymentTypeConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/RedisSinkDeploymentTypeConverterTest.java index 387cab3b..5e5d5d79 100644 --- a/src/test/java/io/odpf/depot/config/converter/RedisSinkDeploymentTypeConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/RedisSinkDeploymentTypeConverterTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.redis.enums.RedisSinkDeploymentType; +import org.raystack.depot.redis.enums.RedisSinkDeploymentType; import org.gradle.internal.impldep.org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/config/converter/RedisSinkTtlTypeConverterTest.java b/src/test/java/org/raystack/depot/config/converter/RedisSinkTtlTypeConverterTest.java similarity index 95% rename from src/test/java/io/odpf/depot/config/converter/RedisSinkTtlTypeConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/RedisSinkTtlTypeConverterTest.java index 50540775..a522a43c 100644 --- a/src/test/java/io/odpf/depot/config/converter/RedisSinkTtlTypeConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/RedisSinkTtlTypeConverterTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.redis.enums.RedisSinkTtlType; import org.gradle.internal.impldep.org.junit.Assert; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/config/converter/SchemaRegistryHeadersConverterTest.java b/src/test/java/org/raystack/depot/config/converter/SchemaRegistryHeadersConverterTest.java similarity index 71% rename from src/test/java/io/odpf/depot/config/converter/SchemaRegistryHeadersConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/SchemaRegistryHeadersConverterTest.java index d5ee2113..d9d93408 100644 --- a/src/test/java/io/odpf/depot/config/converter/SchemaRegistryHeadersConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/SchemaRegistryHeadersConverterTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; import org.aeonbits.owner.ConfigFactory; import org.apache.http.message.BasicHeader; import org.junit.Assert; @@ -17,7 +17,7 @@ public void testConvertIfFetchHeadersValueEmpty() { put("SCHEMA_REGISTRY_STENCIL_FETCH_HEADERS", ""); } }; - OdpfSinkConfig config = ConfigFactory.create(OdpfSinkConfig.class, properties); + SinkConfig config = ConfigFactory.create(SinkConfig.class, properties); Assert.assertEquals(0, config.getSchemaRegistryStencilFetchHeaders().size()); } @@ -25,7 +25,7 @@ public void testConvertIfFetchHeadersValueEmpty() { public void shouldReturnZeroIfPropertyNotMentioned() { Map properties = new HashMap() { }; - OdpfSinkConfig config = ConfigFactory.create(OdpfSinkConfig.class, properties); + SinkConfig config = ConfigFactory.create(SinkConfig.class, properties); Assert.assertEquals(0, config.getSchemaRegistryStencilFetchHeaders().size()); } @@ -36,9 +36,11 @@ public void shouldConvertHeaderKeyValuesWithHeaderObject() { put("SCHEMA_REGISTRY_STENCIL_FETCH_HEADERS", "key1:value1 ,,, key2 : value2,"); } }; - OdpfSinkConfig config = ConfigFactory.create(OdpfSinkConfig.class, properties); - Assert.assertEquals((new BasicHeader("key1", "value1")).toString(), config.getSchemaRegistryStencilFetchHeaders().get(0).toString()); - Assert.assertEquals((new BasicHeader("key2", "value2")).toString(), config.getSchemaRegistryStencilFetchHeaders().get(1).toString()); + SinkConfig config = ConfigFactory.create(SinkConfig.class, properties); + Assert.assertEquals((new BasicHeader("key1", "value1")).toString(), + config.getSchemaRegistryStencilFetchHeaders().get(0).toString()); + Assert.assertEquals((new BasicHeader("key2", "value2")).toString(), + config.getSchemaRegistryStencilFetchHeaders().get(1).toString()); Assert.assertEquals(2, config.getSchemaRegistryStencilFetchHeaders().size()); } } diff --git a/src/test/java/io/odpf/depot/config/converter/SinkConnectorSchemaMessageModeConverterTest.java b/src/test/java/org/raystack/depot/config/converter/SinkConnectorSchemaMessageModeConverterTest.java similarity index 74% rename from src/test/java/io/odpf/depot/config/converter/SinkConnectorSchemaMessageModeConverterTest.java rename to src/test/java/org/raystack/depot/config/converter/SinkConnectorSchemaMessageModeConverterTest.java index abcd6a58..44d63a15 100644 --- a/src/test/java/io/odpf/depot/config/converter/SinkConnectorSchemaMessageModeConverterTest.java +++ b/src/test/java/org/raystack/depot/config/converter/SinkConnectorSchemaMessageModeConverterTest.java @@ -1,11 +1,10 @@ -package io.odpf.depot.config.converter; +package org.raystack.depot.config.converter; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; import org.junit.Assert; import org.junit.Test; import org.junit.jupiter.api.Assertions; - public class SinkConnectorSchemaMessageModeConverterTest { @Test @@ -15,13 +14,13 @@ public void shouldConvertLogKey() { Assert.assertEquals(SinkConnectorSchemaMessageMode.LOG_KEY, mode); } - @Test public void shouldThrowException() { SinkConnectorSchemaMessageModeConverter converter = new SinkConnectorSchemaMessageModeConverter(); Exception exception = Assertions.assertThrows(RuntimeException.class, () -> { converter.convert(null, "Invalid"); }); - Assert.assertEquals("No enum constant io.odpf.depot.message.SinkConnectorSchemaMessageMode.INVALID", exception.getMessage()); + Assert.assertEquals("No enum constant org.raystack.depot.message.SinkConnectorSchemaMessageMode.INVALID", + exception.getMessage()); } } diff --git a/src/test/java/org/raystack/depot/log/LogSinkTest.java b/src/test/java/org/raystack/depot/log/LogSinkTest.java new file mode 100644 index 00000000..4a0c8ad3 --- /dev/null +++ b/src/test/java/org/raystack/depot/log/LogSinkTest.java @@ -0,0 +1,122 @@ +package org.raystack.depot.log; + +import org.raystack.depot.message.json.JsonMessageParser; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.SinkException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageParser; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.JsonParserMetrics; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.ArgumentCaptor; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + +public class LogSinkTest { + private final String template = "\n================= DATA =======================\n{}\n================= METADATA =======================\n{}\n"; + private SinkConfig config; + private MessageParser messageParser; + private Instrumentation instrumentation; + private JsonParserMetrics jsonParserMetrics; + + @Before + public void setUp() throws Exception { + config = mock(SinkConfig.class); + messageParser = mock(MessageParser.class); + instrumentation = mock(Instrumentation.class); + jsonParserMetrics = new JsonParserMetrics(config); + + } + + @Test + public void shouldProcessEmptyMessageWithNoError() throws IOException { + LogSink logSink = new LogSink(config, messageParser, instrumentation); + ArrayList messages = new ArrayList<>(); + SinkResponse sinkResponse = logSink.pushToSink(messages); + Map errors = sinkResponse.getErrors(); + + assertEquals(Collections.emptyMap(), errors); + verify(messageParser, never()).parse(any(), any(), any()); + verify(instrumentation, never()).logInfo(any(), any(), any()); + } + + @Test + public void shouldLogJsonMessages() throws SinkException { + HashMap configMap = new HashMap() { + { + put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "log_message"); + } + }; + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + messageParser = new JsonMessageParser(sinkConfig, instrumentation, jsonParserMetrics); + LogSink logSink = new LogSink(sinkConfig, messageParser, instrumentation); + ArrayList messages = new ArrayList<>(); + String validJsonFirstName = "{\"first_name\":\"john\"}"; + byte[] logMessage1 = validJsonFirstName.getBytes(); + String validJsonLastName = "{\"last_name\":\"doe\"}"; + byte[] logMessage2 = validJsonLastName.getBytes(); + messages.add(new Message(null, logMessage1)); + messages.add(new Message(null, logMessage2)); + SinkResponse sinkResponse = logSink.pushToSink(messages); + + // assert no error + Map errors = sinkResponse.getErrors(); + assertEquals(Collections.emptyMap(), errors); + + // assert processed message + ArgumentCaptor jsonStrCaptor = ArgumentCaptor.forClass(String.class); + verify(instrumentation, times(2)).logInfo(eq(template), jsonStrCaptor.capture(), + eq(Collections.emptyMap().toString())); + assertThat(jsonStrCaptor.getAllValues(), containsInAnyOrder(validJsonFirstName, validJsonLastName)); + } + + @Test + public void shouldReturnErrorResponseAndProcessValidMessage() throws SinkException { + HashMap configMap = new HashMap() { + { + put("SINK_CONNECTOR_SCHEMA_MESSAGE_MODE", "log_message"); + } + }; + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + + messageParser = new JsonMessageParser(sinkConfig, instrumentation, jsonParserMetrics); + LogSink logSink = new LogSink(sinkConfig, messageParser, instrumentation); + ArrayList messages = new ArrayList<>(); + String validJsonFirstName = "{\"first_name\":\"john\"}"; + byte[] logMessage1 = validJsonFirstName.getBytes(); + String invalidJson = "{\"last_name"; + byte[] invalidLogMessage = invalidJson.getBytes(); + messages.add(new Message(null, logMessage1)); + messages.add(new Message(null, invalidLogMessage)); + SinkResponse sinkResponse = logSink.pushToSink(messages); + + // assert error + ErrorInfo error = sinkResponse.getErrorsFor(1L); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, error.getErrorType()); + + // assert valid message processed + ArgumentCaptor jsonStrCaptor = ArgumentCaptor.forClass(String.class); + verify(instrumentation, times(1)).logInfo(eq(template), jsonStrCaptor.capture(), + eq(Collections.emptyMap().toString())); + assertEquals(validJsonFirstName, jsonStrCaptor.getValue().toString()); + } +} diff --git a/src/test/java/io/odpf/depot/message/MessageUtilsTest.java b/src/test/java/org/raystack/depot/message/MessageUtilsTest.java similarity index 72% rename from src/test/java/io/odpf/depot/message/MessageUtilsTest.java rename to src/test/java/org/raystack/depot/message/MessageUtilsTest.java index e7168e17..d0672d7d 100644 --- a/src/test/java/io/odpf/depot/message/MessageUtilsTest.java +++ b/src/test/java/org/raystack/depot/message/MessageUtilsTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message; +package org.raystack.depot.message; import com.jayway.jsonpath.Configuration; import com.jayway.jsonpath.spi.json.JsonOrgJsonProvider; @@ -20,10 +20,10 @@ public void shouldGetStringFieldFromJsonObject() { Assert.assertEquals("test", MessageUtils.getFieldFromJsonObject("test", object, configuration)); } - @Test public void shouldGetFieldFromNested() { - JSONObject object = new JSONObject("{\"test\" :[{\"name\":\"John\",\"age\":50},{\"name\":\"Bob\",\"age\":60},{\"name\":\"Alice\",\"active\":true,\"height\":175}]}"); + JSONObject object = new JSONObject( + "{\"test\" :[{\"name\":\"John\",\"age\":50},{\"name\":\"Bob\",\"age\":60},{\"name\":\"Alice\",\"active\":true,\"height\":175}]}"); Assert.assertEquals("Bob", MessageUtils.getFieldFromJsonObject("test[1].name", object, configuration)); Assert.assertEquals(175, MessageUtils.getFieldFromJsonObject("test[2].height", object, configuration)); } @@ -60,29 +60,38 @@ public void shouldGetRepeatedField() { JSONObject object = new JSONObject(jsonString); Assert.assertEquals(175, MessageUtils.getFieldFromJsonObject("test[2].height", object, configuration)); - Assert.assertEquals("[{\"name\":\"test\",\"value\":\"sometest\"},{\"name\":\"test2\",\"value\":\"sometest2\"}]", MessageUtils.getFieldFromJsonObject("test[0].alist", object, configuration).toString()); - Assert.assertEquals("sometest", MessageUtils.getFieldFromJsonObject("test[0].alist[0].value", object, configuration)); + Assert.assertEquals("[{\"name\":\"test\",\"value\":\"sometest\"},{\"name\":\"test2\",\"value\":\"sometest2\"}]", + MessageUtils.getFieldFromJsonObject("test[0].alist", object, configuration).toString()); + Assert.assertEquals("sometest", + MessageUtils.getFieldFromJsonObject("test[0].alist[0].value", object, configuration)); } @Test public void shouldThrowExceptionIfInvalidPath() { JSONObject object = new JSONObject("{\"test\" :\"test\"}"); - IllegalArgumentException exception = Assert.assertThrows(IllegalArgumentException.class, () -> MessageUtils.getFieldFromJsonObject("testing", object, configuration)); + IllegalArgumentException exception = Assert.assertThrows(IllegalArgumentException.class, + () -> MessageUtils.getFieldFromJsonObject("testing", object, configuration)); Assert.assertEquals("Invalid field config : testing", exception.getMessage()); - exception = Assert.assertThrows(IllegalArgumentException.class, () -> MessageUtils.getFieldFromJsonObject("test[0].testing", object, configuration)); + exception = Assert.assertThrows(IllegalArgumentException.class, + () -> MessageUtils.getFieldFromJsonObject("test[0].testing", object, configuration)); Assert.assertEquals("Invalid field config : test[0].testing", exception.getMessage()); } + @Test public void shouldNotThrowExceptionIfValid() throws IOException { - OdpfMessage message = new OdpfMessage("test", "test"); + Message message = new Message("test", "test"); MessageUtils.validate(message, String.class); } + @Test public void shouldThrowExceptionIfNotValid() { - OdpfMessage message = new OdpfMessage("test", "test"); - IOException ioException = Assertions.assertThrows(IOException.class, () -> MessageUtils.validate(message, Integer.class)); - Assert.assertEquals("Expected class class java.lang.Integer, but found: LogKey class: class java.lang.String, LogMessage class: class java.lang.String", ioException.getMessage()); + Message message = new Message("test", "test"); + IOException ioException = Assertions.assertThrows(IOException.class, + () -> MessageUtils.validate(message, Integer.class)); + Assert.assertEquals( + "Expected class class java.lang.Integer, but found: LogKey class: class java.lang.String, LogMessage class: class java.lang.String", + ioException.getMessage()); } } diff --git a/src/test/java/io/odpf/depot/message/field/proto/DefaultFieldTest.java b/src/test/java/org/raystack/depot/message/field/proto/DefaultFieldTest.java similarity index 91% rename from src/test/java/io/odpf/depot/message/field/proto/DefaultFieldTest.java rename to src/test/java/org/raystack/depot/message/field/proto/DefaultFieldTest.java index 52fb93fc..ff59141a 100644 --- a/src/test/java/io/odpf/depot/message/field/proto/DefaultFieldTest.java +++ b/src/test/java/org/raystack/depot/message/field/proto/DefaultFieldTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.message.field.proto; +package org.raystack.depot.message.field.proto; -import io.odpf.depot.message.field.GenericField; +import org.raystack.depot.message.field.GenericField; import org.junit.Assert; import org.junit.Test; @@ -32,7 +32,8 @@ public void shouldReturnDefaultPrimitiveFields() { tss.add(Instant.ofEpochSecond(1002121010)); tss.add(Instant.ofEpochSecond(1003121010)); f = new TimeStampField(tss); - Assert.assertEquals("[\"2001-09-10T11:23:30Z\",\"2001-10-03T14:56:50Z\",\"2001-10-15T04:43:30Z\"]", f.getString()); + Assert.assertEquals("[\"2001-09-10T11:23:30Z\",\"2001-10-03T14:56:50Z\",\"2001-10-15T04:43:30Z\"]", + f.getString()); List booleanList = new ArrayList<>(); booleanList.add(true); diff --git a/src/test/java/io/odpf/depot/message/field/proto/DurationFieldTest.java b/src/test/java/org/raystack/depot/message/field/proto/DurationFieldTest.java similarity index 97% rename from src/test/java/io/odpf/depot/message/field/proto/DurationFieldTest.java rename to src/test/java/org/raystack/depot/message/field/proto/DurationFieldTest.java index 6da287d5..0ed73697 100644 --- a/src/test/java/io/odpf/depot/message/field/proto/DurationFieldTest.java +++ b/src/test/java/org/raystack/depot/message/field/proto/DurationFieldTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.field.proto; +package org.raystack.depot.message.field.proto; import com.google.protobuf.Duration; import com.google.protobuf.DynamicMessage; diff --git a/src/test/java/io/odpf/depot/message/field/proto/MapFieldTest.java b/src/test/java/org/raystack/depot/message/field/proto/MapFieldTest.java similarity index 83% rename from src/test/java/io/odpf/depot/message/field/proto/MapFieldTest.java rename to src/test/java/org/raystack/depot/message/field/proto/MapFieldTest.java index 31cea266..758447d6 100644 --- a/src/test/java/io/odpf/depot/message/field/proto/MapFieldTest.java +++ b/src/test/java/org/raystack/depot/message/field/proto/MapFieldTest.java @@ -1,11 +1,11 @@ -package io.odpf.depot.message.field.proto; +package org.raystack.depot.message.field.proto; import com.google.protobuf.Duration; import com.google.protobuf.Struct; import com.google.protobuf.Timestamp; import com.google.protobuf.Value; -import io.odpf.depot.TestMapMessage; -import io.odpf.depot.TestMessage; +import org.raystack.depot.TestMapMessage; +import org.raystack.depot.TestMessage; import org.junit.Assert; import org.junit.Test; import org.skyscreamer.jsonassert.JSONAssert; @@ -65,14 +65,14 @@ public void shouldReturnStructMap() { TestMapMessage message = TestMapMessage.newBuilder() .putStructMap("test1", Struct.newBuilder().putFields( - "mykey", - Value.newBuilder().setStructValue( - Struct.newBuilder().putFields("another", - Value.newBuilder() - .setStringValue("finally") - .build()) - .build()) + "mykey", + Value.newBuilder().setStructValue( + Struct.newBuilder().putFields("another", + Value.newBuilder() + .setStringValue("finally") + .build()) .build()) + .build()) .build()) .build(); diff --git a/src/test/java/org/raystack/depot/message/field/proto/MessageFieldTest.java b/src/test/java/org/raystack/depot/message/field/proto/MessageFieldTest.java new file mode 100644 index 00000000..fe5cecf3 --- /dev/null +++ b/src/test/java/org/raystack/depot/message/field/proto/MessageFieldTest.java @@ -0,0 +1,96 @@ +package org.raystack.depot.message.field.proto; + +import com.google.protobuf.Timestamp; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestNestedRepeatedMessage; +import org.junit.Test; +import org.skyscreamer.jsonassert.JSONAssert; + +public class MessageFieldTest { + + @Test + public void shouldReturnJsonStringForMessage() { + TestMessage message = TestMessage.newBuilder() + .setOrderNumber("number") + .setOrderDetails("details") + .setOrderUrl("url") + .build(); + MessageField field = new MessageField(message); + String expectedJson = "{\"order_number\":\"number\",\"order_url\":\"url\",\"order_details\":\"details\"}"; + JSONAssert.assertEquals(expectedJson, field.getString(), true); + } + + @Test + public void shouldReturnMessageForRepeatedMessage() { + TestNestedRepeatedMessage message = TestNestedRepeatedMessage.newBuilder() + .addRepeatedMessage(TestMessage.newBuilder() + .setOrderNumber("number") + .setOrderDetails("details") + .setOrderUrl("url") + .build()) + .addRepeatedMessage(TestMessage.newBuilder() + .setOrderNumber("o2") + .setOrderDetails("d2") + .setOrderUrl("url2") + .build()) + .setSingleMessage(TestMessage.newBuilder() + .setOrderNumber("order1") + .setOrderDetails("de1") + .setOrderUrl("url1") + .build()) + .setNumberField(10) + .addRepeatedNumberField(12) + .addRepeatedNumberField(13) + .setSingleTimestamp(Timestamp.newBuilder().setSeconds(1669962594).build()) + .addRepeatedTimestamp(Timestamp.newBuilder().setSeconds(1669932594).build()) + .addRepeatedTimestamp(Timestamp.newBuilder().setSeconds(1664932594).build()) + .build(); + MessageField field = new MessageField(message); + String expectedJson = "{\n" + + " \"single_timestamp\": \"2022-12-02T06:29:54Z\",\n" + + " \"repeated_number_field\": [\n" + + " 12,\n" + + " 13\n" + + " ],\n" + + " \"repeated_timestamp\": [\n" + + " \"2022-12-01T22:09:54Z\",\n" + + " \"2022-10-05T01:16:34Z\"\n" + + " ],\n" + + " \"repeated_message\": [\n" + + " {\n" + + " \"order_url\": \"url\",\n" + + " \"order_number\": \"number\",\n" + + " \"order_details\": \"details\"\n" + + " },\n" + + " {\n" + + " \"order_url\": \"url2\",\n" + + " \"order_number\": \"o2\",\n" + + " \"order_details\": \"d2\"\n" + + " }\n" + + " ],\n" + + " \"single_message\": {\n" + + " \"order_url\": \"url1\",\n" + + " \"order_number\": \"order1\",\n" + + " \"order_details\": \"de1\"\n" + + " },\n" + + " \"number_field\": 10\n" + + "}\n"; + JSONAssert.assertEquals(expectedJson, field.getString(), true); + + expectedJson = "[\n" + + " {\n" + + " \"order_number\": \"number\",\n" + + " \"order_url\": \"url\",\n" + + " \"order_details\": \"details\"\n" + + " },\n" + + " {\n" + + " \"order_number\": \"o2\",\n" + + " \"order_url\": \"url2\",\n" + + " \"order_details\": \"d2\"\n" + + " }\n" + + "]"; + field = new MessageField( + message.getField(message.getDescriptorForType().findFieldByName("repeated_message"))); + JSONAssert.assertEquals(expectedJson, field.getString(), true); + } +} diff --git a/src/test/java/io/odpf/depot/message/field/proto/TimeStampFieldTest.java b/src/test/java/org/raystack/depot/message/field/proto/TimeStampFieldTest.java similarity index 63% rename from src/test/java/io/odpf/depot/message/field/proto/TimeStampFieldTest.java rename to src/test/java/org/raystack/depot/message/field/proto/TimeStampFieldTest.java index 76a9b5a3..9b11cf1c 100644 --- a/src/test/java/io/odpf/depot/message/field/proto/TimeStampFieldTest.java +++ b/src/test/java/org/raystack/depot/message/field/proto/TimeStampFieldTest.java @@ -1,7 +1,7 @@ -package io.odpf.depot.message.field.proto; +package org.raystack.depot.message.field.proto; import com.google.protobuf.Timestamp; -import io.odpf.depot.TestDurationMessage; +import org.raystack.depot.TestDurationMessage; import org.junit.Assert; import org.junit.Test; @@ -11,10 +11,11 @@ public class TimeStampFieldTest { public void shouldReturnTimeStamps() { TestDurationMessage message = TestDurationMessage .newBuilder() - .setEventTimestamp(Timestamp.newBuilder().setSeconds(1669962594) .build()) + .setEventTimestamp(Timestamp.newBuilder().setSeconds(1669962594).build()) .build(); TimeStampField field = new TimeStampField( - TimeStampField.getInstant(message.getField(message.getDescriptorForType().findFieldByName("event_timestamp")))); + TimeStampField.getInstant( + message.getField(message.getDescriptorForType().findFieldByName("event_timestamp")))); Assert.assertEquals("2022-12-02T06:29:54Z", field.getString()); } } diff --git a/src/test/java/io/odpf/depot/message/json/JsonOdpfMessageParserTest.java b/src/test/java/org/raystack/depot/message/json/JsonMessageParserTest.java similarity index 51% rename from src/test/java/io/odpf/depot/message/json/JsonOdpfMessageParserTest.java rename to src/test/java/org/raystack/depot/message/json/JsonMessageParserTest.java index 2ef7a9e0..54f0c670 100644 --- a/src/test/java/io/odpf/depot/message/json/JsonOdpfMessageParserTest.java +++ b/src/test/java/org/raystack/depot/message/json/JsonMessageParserTest.java @@ -1,11 +1,11 @@ -package io.odpf.depot.message.json; - -import io.odpf.depot.config.OdpfSinkConfig; -import io.odpf.depot.exception.EmptyMessageException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.JsonParserMetrics; +package org.raystack.depot.message.json; + +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.exception.EmptyMessageException; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.JsonParserMetrics; import org.aeonbits.owner.ConfigFactory; import org.json.JSONException; import org.json.JSONObject; @@ -17,8 +17,8 @@ import java.util.Map; import static com.google.common.collect.ImmutableMap.of; -import static io.odpf.depot.message.SinkConnectorSchemaMessageMode.LOG_KEY; -import static io.odpf.depot.message.SinkConnectorSchemaMessageMode.LOG_MESSAGE; +import static org.raystack.depot.message.SinkConnectorSchemaMessageMode.LOG_KEY; +import static org.raystack.depot.message.SinkConnectorSchemaMessageMode.LOG_MESSAGE; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertThrows; import static org.junit.Assert.assertTrue; @@ -28,37 +28,39 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; +public class JsonMessageParserTest { -public class JsonOdpfMessageParserTest { - - private final OdpfSinkConfig defaultConfig = ConfigFactory.create(OdpfSinkConfig.class, Collections.emptyMap()); + private final SinkConfig defaultConfig = ConfigFactory.create(SinkConfig.class, Collections.emptyMap()); private final Instrumentation instrumentation = mock(Instrumentation.class); private final JsonParserMetrics jsonParserMetrics = new JsonParserMetrics(defaultConfig); /* - JSONObject.equals does reference check, so cant use assertEquals instead we use expectedJson.similar(actualJson) - reference https://github.com/stleary/JSON-java/blob/master/src/test/java/org/json/junit/JSONObjectTest.java#L132 - */ + * JSONObject.equals does reference check, so cant use assertEquals instead we + * use expectedJson.similar(actualJson) + * reference + * https://github.com/stleary/JSON-java/blob/master/src/test/java/org/json/junit + * /JSONObjectTest.java#L132 + */ @Test public void shouldParseJsonLogMessage() throws IOException { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String validJsonStr = "{\"first_name\":\"john\"}"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(null, validJsonStr.getBytes()); + Message jsonMessage = new Message(null, validJsonStr.getBytes()); - ParsedOdpfMessage parsedOdpfMessage = jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_MESSAGE, null); - JSONObject actualJson = (JSONObject) parsedOdpfMessage.getRaw(); + ParsedMessage parsedMessage = jsonMessageParser.parse(jsonMessage, LOG_MESSAGE, null); + JSONObject actualJson = (JSONObject) parsedMessage.getRaw(); JSONObject expectedJsonObject = new JSONObject(validJsonStr); assertTrue(expectedJsonObject.similar(actualJson)); } @Test public void shouldPublishTimeTakenToCastJsonValuesToString() throws IOException { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String validJsonStr = "{\"first_name\":\"john\"}"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(null, validJsonStr.getBytes()); + Message jsonMessage = new Message(null, validJsonStr.getBytes()); - ParsedOdpfMessage parsedOdpfMessage = jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_MESSAGE, null); - JSONObject actualJson = (JSONObject) parsedOdpfMessage.getRaw(); + ParsedMessage parsedMessage = jsonMessageParser.parse(jsonMessage, LOG_MESSAGE, null); + JSONObject actualJson = (JSONObject) parsedMessage.getRaw(); JSONObject expectedJsonObject = new JSONObject(validJsonStr); assertTrue(expectedJsonObject.similar(actualJson)); verify(instrumentation, times(1)).captureDurationSince( @@ -68,8 +70,8 @@ public void shouldPublishTimeTakenToCastJsonValuesToString() throws IOException @Test public void shouldCastTheJSONValuesToString() throws IOException { Map configMap = of("SINK_BIGQUERY_DEFAULT_DATATYPE_STRING_ENABLE", "true"); - OdpfSinkConfig config = ConfigFactory.create(OdpfSinkConfig.class, configMap); - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(config, instrumentation, jsonParserMetrics); + SinkConfig config = ConfigFactory.create(SinkConfig.class, configMap); + JsonMessageParser jsonMessageParser = new JsonMessageParser(config, instrumentation, jsonParserMetrics); String validJsonStr = "{\n" + " \"idfv\": \"FE533F4A-F776-4BEF-98B7-6BD1DFC2972C\",\n" + " \"is_lat\": true,\n" @@ -77,23 +79,23 @@ public void shouldCastTheJSONValuesToString() throws IOException { + " \"sdk_version\": 6.310932397154218,\n" + " \"whole_number\": 2\n" + "}"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(null, validJsonStr.getBytes()); + Message jsonMessage = new Message(null, validJsonStr.getBytes()); - ParsedOdpfMessage parsedOdpfMessage = jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_MESSAGE, null); - JSONObject actualJson = (JSONObject) parsedOdpfMessage.getRaw(); + ParsedMessage parsedMessage = jsonMessageParser.parse(jsonMessage, LOG_MESSAGE, null); + JSONObject actualJson = (JSONObject) parsedMessage.getRaw(); String stringifiedJsonStr = "{\n" - //normal string should remain as is + // normal string should remain as is + " \"idfv\": \"FE533F4A-F776-4BEF-98B7-6BD1DFC2972C\",\n" - //boolean should be converted to string + // boolean should be converted to string + " \"is_lat\": \"true\",\n" - //null will not be there entirely - //" \"contributor_2_af_prt\": null,\n" + // null will not be there entirely + // " \"contributor_2_af_prt\": null,\n" - //float should be converted to string + // float should be converted to string + " \"sdk_version\": \"6.310932397154218\",\n" - //integer should be converted to string + // integer should be converted to string + " \"whole_number\": \"2\"\n" + "}"; JSONObject expectedJsonObject = new JSONObject(stringifiedJsonStr); @@ -102,7 +104,7 @@ public void shouldCastTheJSONValuesToString() throws IOException { @Test public void shouldThrowExceptionForNestedJsonNotSupported() { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String nestedJsonStr = "{\n" + " \"event_value\": {\n" + " \"CustomerLatitude\": \"-6.166895595817224\",\n" @@ -114,65 +116,64 @@ public void shouldThrowExceptionForNestedJsonNotSupported() { + " \"is_receipt_validated\": null,\n" + " \"contributor_1_campaign\": null\n" + "}"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(null, nestedJsonStr.getBytes()); + Message jsonMessage = new Message(null, nestedJsonStr.getBytes()); UnsupportedOperationException exception = assertThrows(UnsupportedOperationException.class, - () -> jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_MESSAGE, null)); + () -> jsonMessageParser.parse(jsonMessage, LOG_MESSAGE, null)); assertEquals("nested json structure not supported yet", exception.getMessage()); } - @Test public void shouldThrowErrorForInvalidLogMessage() { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String invalidJsonStr = "{\"first_"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(null, invalidJsonStr.getBytes()); + Message jsonMessage = new Message(null, invalidJsonStr.getBytes()); IOException ioException = assertThrows(IOException.class, - () -> jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_MESSAGE, null)); + () -> jsonMessageParser.parse(jsonMessage, LOG_MESSAGE, null)); assertEquals("invalid json error", ioException.getMessage()); assertTrue(ioException.getCause() instanceof JSONException); } @Test public void shouldThrowEmptyMessageException() { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); - OdpfMessage jsonOdpfMessage = new OdpfMessage(null, null); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + Message jsonMessage = new Message(null, null); EmptyMessageException emptyMessageException = assertThrows(EmptyMessageException.class, - () -> jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_MESSAGE, null)); + () -> jsonMessageParser.parse(jsonMessage, LOG_MESSAGE, null)); assertEquals("log message is empty", emptyMessageException.getMessage()); } @Test public void shouldParseJsonKeyMessage() throws IOException { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String validJsonStr = "{\"first_name\":\"john\"}"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(validJsonStr.getBytes(), null); + Message jsonMessage = new Message(validJsonStr.getBytes(), null); - ParsedOdpfMessage parsedOdpfMessage = jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_KEY, null); - JSONObject actualJson = (JSONObject) parsedOdpfMessage.getRaw(); + ParsedMessage parsedMessage = jsonMessageParser.parse(jsonMessage, LOG_KEY, null); + JSONObject actualJson = (JSONObject) parsedMessage.getRaw(); JSONObject expectedJsonObject = new JSONObject(validJsonStr); assertTrue(expectedJsonObject.similar(actualJson)); } @Test public void shouldThrowErrorForInvalidKeyMessage() { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String invalidJsonStr = "{\"first_"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(invalidJsonStr.getBytes(), null); + Message jsonMessage = new Message(invalidJsonStr.getBytes(), null); IOException ioException = assertThrows(IOException.class, - () -> jsonOdpfMessageParser.parse(jsonOdpfMessage, LOG_KEY, null)); + () -> jsonMessageParser.parse(jsonMessage, LOG_KEY, null)); assertEquals("invalid json error", ioException.getMessage()); assertTrue(ioException.getCause() instanceof JSONException); } @Test public void shouldThrowErrorWhenModeNotDefined() { - JsonOdpfMessageParser jsonOdpfMessageParser = new JsonOdpfMessageParser(defaultConfig, instrumentation, jsonParserMetrics); + JsonMessageParser jsonMessageParser = new JsonMessageParser(defaultConfig, instrumentation, jsonParserMetrics); String invalidJsonStr = "{\"first_"; - OdpfMessage jsonOdpfMessage = new OdpfMessage(invalidJsonStr.getBytes(), null); + Message jsonMessage = new Message(invalidJsonStr.getBytes(), null); IOException ioException = assertThrows(IOException.class, - () -> jsonOdpfMessageParser.parse(jsonOdpfMessage, null, null)); + () -> jsonMessageParser.parse(jsonMessage, null, null)); assertEquals("message mode not defined", ioException.getMessage()); } } diff --git a/src/test/java/io/odpf/depot/message/json/JsonOdpfParsedMessageTest.java b/src/test/java/org/raystack/depot/message/json/JsonParsedMessageTest.java similarity index 72% rename from src/test/java/io/odpf/depot/message/json/JsonOdpfParsedMessageTest.java rename to src/test/java/org/raystack/depot/message/json/JsonParsedMessageTest.java index ef26840e..d67bcfe4 100644 --- a/src/test/java/io/odpf/depot/message/json/JsonOdpfParsedMessageTest.java +++ b/src/test/java/org/raystack/depot/message/json/JsonParsedMessageTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.json; +package org.raystack.depot.message.json; import com.jayway.jsonpath.Configuration; import com.jayway.jsonpath.spi.json.JsonOrgJsonProvider; @@ -13,15 +13,15 @@ import static org.junit.Assert.assertEquals; -public class JsonOdpfParsedMessageTest { +public class JsonParsedMessageTest { private final Configuration configuration = Configuration.builder() .jsonProvider(new JsonOrgJsonProvider()) .build(); @Test public void shouldGetEmptyMappingKeysForEmptyJsonObject() { - //for empty json object - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(new JSONObject(), configuration); + // for empty json object + JsonParsedMessage parsedMessage = new JsonParsedMessage(new JSONObject(), configuration); Map parsedMessageMapping = parsedMessage.getMapping(null); assertEquals(Collections.emptyMap(), parsedMessageMapping); @@ -29,7 +29,7 @@ public void shouldGetEmptyMappingKeysForEmptyJsonObject() { @Test public void shouldGetEmptyMappingKeysForNullJsonObject() { - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(null, configuration); + JsonParsedMessage parsedMessage = new JsonParsedMessage(null, configuration); Map parsedMessageMapping = parsedMessage.getMapping(null); assertEquals(Collections.emptyMap(), parsedMessageMapping); } @@ -37,7 +37,7 @@ public void shouldGetEmptyMappingKeysForNullJsonObject() { @Test public void shouldGetMappings() { JSONObject personDetails = new JSONObject("{\"first_name\": \"john doe\", \"address\": \"planet earth\"}"); - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(personDetails, configuration); + JsonParsedMessage parsedMessage = new JsonParsedMessage(personDetails, configuration); Map parsedMessageMapping = parsedMessage.getMapping(null); Map expectedMap = new HashMap<>(); expectedMap.put("first_name", "john doe"); @@ -48,8 +48,8 @@ public void shouldGetMappings() { @Test public void shouldReturnValueFromFlatJson() { JSONObject personDetails = new JSONObject("{\"first_name\": \"john doe\", \"address\": \"planet earth\"}"); - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(personDetails, configuration); - Assert.assertEquals("john doe", parsedMessage.getFieldByName("first_name", null)); + JsonParsedMessage parsedMessage = new JsonParsedMessage(personDetails, configuration); + assertEquals("john doe", parsedMessage.getFieldByName("first_name", null)); } @Test @@ -59,8 +59,8 @@ public void shouldReturnValueFromNestedJson() { + " \"address\": \"planet earth\", " + "\"family\" : {\"brother\" : \"david doe\"}" + "}"); - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(personDetails, configuration); - Assert.assertEquals("david doe", parsedMessage.getFieldByName("family.brother", null)); + JsonParsedMessage parsedMessage = new JsonParsedMessage(personDetails, configuration); + assertEquals("david doe", parsedMessage.getFieldByName("family.brother", null)); } @Test @@ -70,8 +70,9 @@ public void shouldThrowExceptionIfNotFound() { + " \"address\": \"planet earth\", " + "\"family\" : {\"brother\" : \"david doe\"}" + "}"); - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(personDetails, configuration); - java.lang.IllegalArgumentException illegalArgumentException = Assert.assertThrows(java.lang.IllegalArgumentException.class, () -> parsedMessage.getFieldByName("family.sister", null)); + JsonParsedMessage parsedMessage = new JsonParsedMessage(personDetails, configuration); + java.lang.IllegalArgumentException illegalArgumentException = Assert.assertThrows( + java.lang.IllegalArgumentException.class, () -> parsedMessage.getFieldByName("family.sister", null)); Assert.assertEquals("Invalid field config : family.sister", illegalArgumentException.getMessage()); } @@ -82,7 +83,7 @@ public void shouldReturnListFromNestedJson() { + " \"address\": \"planet earth\", " + "\"family\" : [{\"brother\" : \"david doe\"}, {\"brother\" : \"cain doe\"}]" + "}"); - JsonOdpfParsedMessage parsedMessage = new JsonOdpfParsedMessage(personDetails, configuration); + JsonParsedMessage parsedMessage = new JsonParsedMessage(personDetails, configuration); JSONArray family = (JSONArray) parsedMessage.getFieldByName("family", null); Assert.assertEquals(2, family.length()); Assert.assertEquals("david doe", ((JSONObject) family.get(0)).get("brother")); diff --git a/src/test/java/org/raystack/depot/message/proto/ProtoFieldParserTest.java b/src/test/java/org/raystack/depot/message/proto/ProtoFieldParserTest.java new file mode 100644 index 00000000..cb2e2b97 --- /dev/null +++ b/src/test/java/org/raystack/depot/message/proto/ProtoFieldParserTest.java @@ -0,0 +1,238 @@ +package org.raystack.depot.message.proto; + +import com.google.protobuf.*; +import com.google.type.Date; +import org.raystack.depot.TestMessageBQ; +import org.raystack.depot.TestNestedMessageBQ; +import org.raystack.depot.TestRecursiveMessageBQ; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; + +public class ProtoFieldParserTest { + private ProtoFieldParser protoMappingParser; + + @Before + public void setup() { + this.protoMappingParser = new ProtoFieldParser(); + } + + @Test(expected = RuntimeException.class) + public void shouldThrowExceptionIfProtoNotFound() { + protoMappingParser.parseFields(null, "test", new HashMap<>(), new HashMap<>()); + } + + @Test(expected = RuntimeException.class) + public void shouldThrowExceptionIfNestedProtoNotFound() { + Map descriptorMap = new HashMap() { + { + put("org.raystack.depot.TestMessageBQ", TestMessageBQ.getDescriptor()); + } + }; + ProtoField protoField = new ProtoField(); + protoMappingParser.parseFields(protoField, "org.raystack.depot.TestNestedMessageBQ", descriptorMap, + new HashMap<>()); + } + + @Test + public void shouldParseProtoSchemaForNonNestedFields() { + ArrayList fileDescriptors = new ArrayList<>(); + + fileDescriptors.add(TestMessageBQ.getDescriptor().getFile()); + fileDescriptors.add(Duration.getDescriptor().getFile()); + fileDescriptors.add(Date.getDescriptor().getFile()); + fileDescriptors.add(Struct.getDescriptor().getFile()); + fileDescriptors.add(Timestamp.getDescriptor().getFile()); + + Map descriptorMap = getDescriptors(fileDescriptors); + + Map typeNameToPackageNameMap = new HashMap() { + { + put(".raystack.depot.TestMessageBQ.CurrentStateEntry", + "org.raystack.depot.TestMessageBQ.CurrentStateEntry"); + put(".google.protobuf.Struct.FieldsEntry", "com.google.protobuf.Struct.FieldsEntry"); + put(".google.protobuf.Duration", "com.google.protobuf.Duration"); + put(".google.type.Date", "com.google.type.Date"); + } + }; + + ProtoField protoField = new ProtoField(); + protoField = protoMappingParser.parseFields(protoField, "org.raystack.depot.TestMessageBQ", + descriptorMap, + typeNameToPackageNameMap); + assertTestMessage(protoField.getFields()); + } + + @Test + public void shouldParseProtoSchemaForRecursiveFieldTillMaxLevel() { + ArrayList fileDescriptors = new ArrayList<>(); + + fileDescriptors.add(TestRecursiveMessageBQ.getDescriptor().getFile()); + + Map descriptorMap = getDescriptors(fileDescriptors); + + Map typeNameToPackageNameMap = new HashMap() { + { + put(".raystack.depot.TestRecursiveMessageBQ", + "org.raystack.depot.TestRecursiveMessageBQ"); + } + }; + + ProtoField protoField = new ProtoField(); + protoField = protoMappingParser.parseFields(protoField, "org.raystack.depot.TestRecursiveMessageBQ", + descriptorMap, typeNameToPackageNameMap); + assertField(protoField.getFields().get(0), "string_value", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); + assertField(protoField.getFields().get(1), "float_value", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); + + ProtoField recursiveField = protoField; + int totalLevel = 1; + while (recursiveField.getFields().size() == 3) { + assertField(protoField.getFields().get(0), "string_value", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); + assertField(protoField.getFields().get(1), "float_value", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); + recursiveField = recursiveField.getFields().get(2); + totalLevel++; + } + assertEquals(15, totalLevel); + } + + @Test + public void shouldParseProtoSchemaForNestedFields() { + ArrayList fileDescriptors = new ArrayList<>(); + + fileDescriptors.add(TestMessageBQ.getDescriptor().getFile()); + fileDescriptors.add(Duration.getDescriptor().getFile()); + fileDescriptors.add(Date.getDescriptor().getFile()); + fileDescriptors.add(Struct.getDescriptor().getFile()); + fileDescriptors.add(TestNestedMessageBQ.getDescriptor().getFile()); + + Map descriptorMap = getDescriptors(fileDescriptors); + + Map typeNameToPackageNameMap = new HashMap() { + { + put(".raystack.depot.TestMessageBQ.CurrentStateEntry", + "org.raystack.depot.TestMessageBQ.CurrentStateEntry"); + put(".google.protobuf.Struct.FieldsEntry", "com.google.protobuf.Struct.FieldsEntry"); + put(".google.protobuf.Duration", "com.google.protobuf.Duration"); + put(".google.type.Date", "com.google.type.Date"); + put(".raystack.depot.TestMessageBQ", "org.raystack.depot.TestMessageBQ"); + } + }; + + ProtoField protoField = new ProtoField(); + protoField = protoMappingParser.parseFields(protoField, "org.raystack.depot.TestNestedMessageBQ", + descriptorMap, + typeNameToPackageNameMap); + assertField(protoField.getFields().get(0), "nested_id", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); + assertField(protoField.getFields().get(1), "single_message", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); + + assertTestMessage(protoField.getFields().get(1).getFields()); + } + + private Map getDescriptors( + ArrayList fileDescriptors) { + Map descriptorMap = new HashMap<>(); + fileDescriptors.forEach(fd -> { + String javaPackage = fd.getOptions().getJavaPackage(); + fd.getMessageTypes().forEach(desc -> { + String className = desc.getName(); + desc.getNestedTypes().forEach(nestedDesc -> { + String nestedClassName = nestedDesc.getName(); + descriptorMap.put(String.format("%s.%s.%s", javaPackage, className, + nestedClassName), nestedDesc); + }); + descriptorMap.put(String.format("%s.%s", javaPackage, className), desc); + }); + }); + return descriptorMap; + } + + private void assertTestMessage(List fields) { + assertEquals(19, fields.size()); + assertField(fields.get(0), "order_number", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); + assertField(fields.get(1), "order_url", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); + assertField(fields.get(2), "order_details", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 3); + assertField(fields.get(3), "created_at", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 4); + assertField(fields.get(4), "status", DescriptorProtos.FieldDescriptorProto.Type.TYPE_ENUM, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 5); + assertField(fields.get(5), "discount", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 6); + assertField(fields.get(6), "success", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BOOL, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 7); + assertField(fields.get(7), "price", DescriptorProtos.FieldDescriptorProto.Type.TYPE_FLOAT, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 8); + assertField(fields.get(8), "current_state", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 9); + assertField(fields.get(9), "user_token", DescriptorProtos.FieldDescriptorProto.Type.TYPE_BYTES, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 10); + assertField(fields.get(10), "trip_duration", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 11); + assertField(fields.get(11), "aliases", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 12); + assertField(fields.get(12), "properties", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 13); + assertField(fields.get(13), "order_date", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 14); + assertField(fields.get(14), "updated_at", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 15); + assertField(fields.get(15), "attributes", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 16); + assertField(fields.get(16), "intervals", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_REPEATED, 17); + assertField(fields.get(17), "counter", DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 18); + assertField(fields.get(18), "camelCase", DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 19); + + assertEquals(String.format(".%s", Duration.getDescriptor().getFullName()), + fields.get(10).getTypeName()); + assertEquals(2, fields.get(10).getFields().size()); + assertField(fields.get(10).getFields().get(0), "seconds", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT64, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); + assertField(fields.get(10).getFields().get(1), "nanos", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); + + assertEquals(String.format(".%s", Date.getDescriptor().getFullName()), fields.get(13).getTypeName()); + assertEquals(3, fields.get(13).getFields().size()); + assertField(fields.get(13).getFields().get(0), "year", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 1); + assertField(fields.get(13).getFields().get(1), "month", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 2); + assertField(fields.get(13).getFields().get(2), "day", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_INT32, + DescriptorProtos.FieldDescriptorProto.Label.LABEL_OPTIONAL, 3); + } + + private void assertField(ProtoField field, String name, DescriptorProtos.FieldDescriptorProto.Type ftype, + DescriptorProtos.FieldDescriptorProto.Label flabel, int index) { + assertEquals(name, field.getName()); + assertEquals(ftype, field.getType()); + assertEquals(flabel, field.getLabel()); + assertEquals(index, field.getIndex()); + } +} diff --git a/src/test/java/io/odpf/depot/message/proto/ProtoMapperTest.java b/src/test/java/org/raystack/depot/message/proto/ProtoMapperTest.java similarity index 65% rename from src/test/java/io/odpf/depot/message/proto/ProtoMapperTest.java rename to src/test/java/org/raystack/depot/message/proto/ProtoMapperTest.java index 9658e06b..224ad28a 100644 --- a/src/test/java/io/odpf/depot/message/proto/ProtoMapperTest.java +++ b/src/test/java/org/raystack/depot/message/proto/ProtoMapperTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.node.JsonNodeFactory; @@ -15,16 +15,17 @@ public class ProtoMapperTest { private final ObjectMapper objectMapper = new ObjectMapper(); - @Test public void shouldTestShouldCreateFirstLevelColumnMappingSuccessfully() throws IOException { - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("order_number", 1)); - add(TestProtoUtil.createProtoField("order_url", 2)); - add(TestProtoUtil.createProtoField("order_details", 3)); - add(TestProtoUtil.createProtoField("created_at", 4)); - add(TestProtoUtil.createProtoField("status", 5)); - }}); + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("order_number", 1)); + add(TestProtoUtil.createProtoField("order_url", 2)); + add(TestProtoUtil.createProtoField("order_details", 3)); + add(TestProtoUtil.createProtoField("created_at", 4)); + add(TestProtoUtil.createProtoField("status", 5)); + } + }); ObjectNode objNode = JsonNodeFactory.instance.objectNode(); objNode.put("1", "order_number"); @@ -41,14 +42,19 @@ public void shouldTestShouldCreateFirstLevelColumnMappingSuccessfully() throws I @Test public void shouldTestShouldCreateNestedMapping() throws IOException { - ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() {{ - add(TestProtoUtil.createProtoField("order_number", 1)); - add(TestProtoUtil.createProtoField("order_url", "some.type.name", DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, 2, new ArrayList() {{ - add(TestProtoUtil.createProtoField("host", 1)); - add(TestProtoUtil.createProtoField("url", 2)); - }})); - add(TestProtoUtil.createProtoField("order_details", 3)); - }}); + ProtoField protoField = TestProtoUtil.createProtoField(new ArrayList() { + { + add(TestProtoUtil.createProtoField("order_number", 1)); + add(TestProtoUtil.createProtoField("order_url", "some.type.name", + DescriptorProtos.FieldDescriptorProto.Type.TYPE_MESSAGE, 2, new ArrayList() { + { + add(TestProtoUtil.createProtoField("host", 1)); + add(TestProtoUtil.createProtoField("url", 2)); + } + })); + add(TestProtoUtil.createProtoField("order_details", 3)); + } + }); ObjectNode objNode = JsonNodeFactory.instance.objectNode(); ObjectNode innerObjNode = JsonNodeFactory.instance.objectNode(); @@ -59,7 +65,6 @@ public void shouldTestShouldCreateNestedMapping() throws IOException { objNode.put("2", innerObjNode); objNode.put("3", "order_details"); - String columnMapping = ProtoMapper.generateColumnMappings(protoField.getFields()); String expectedProtoMapping = objectMapper.writeValueAsString(objNode); assertEquals(expectedProtoMapping, columnMapping); diff --git a/src/test/java/org/raystack/depot/message/proto/ProtoMessageParserTest.java b/src/test/java/org/raystack/depot/message/proto/ProtoMessageParserTest.java new file mode 100644 index 00000000..3f805f85 --- /dev/null +++ b/src/test/java/org/raystack/depot/message/proto/ProtoMessageParserTest.java @@ -0,0 +1,97 @@ +package org.raystack.depot.message.proto; + +import com.google.protobuf.InvalidProtocolBufferException; +import org.raystack.depot.TestMessage; +import org.raystack.depot.config.SinkConfig; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.stencil.DepotStencilUpdateListener; +import org.raystack.depot.message.Message; +import org.aeonbits.owner.ConfigFactory; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashMap; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.mock; + +public class ProtoMessageParserTest { + + private final HashMap configMap = new HashMap() { + { + put("SCHEMA_REGISTRY_STENCIL_ENABLE", "false"); + put("SINK_CONNECTOR_SCHEMA_PROTO_MESSAGE_CLASS", "org.raystack.depot.TestMessage"); + } + }; + + @Test + public void shouldParseLogMessage() throws IOException { + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + StatsDReporter statsdReporter = mock(StatsDReporter.class); + DepotStencilUpdateListener protoUpdateListener = mock(DepotStencilUpdateListener.class); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(sinkConfig, statsdReporter, protoUpdateListener); + TestMessage testMessage = TestMessage.newBuilder().setOrderNumber("order-1").build(); + Message message = new Message(null, testMessage.toByteArray()); + ParsedMessage parsedMessage = protoMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + "org.raystack.depot.TestMessage"); + assertEquals(testMessage, parsedMessage.getRaw()); + + } + + @Test + public void shouldThrowErrorOnInvalidMessage() { + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + StatsDReporter statsdReporter = mock(StatsDReporter.class); + DepotStencilUpdateListener protoUpdateListener = mock(DepotStencilUpdateListener.class); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(sinkConfig, statsdReporter, protoUpdateListener); + byte[] invalidMessageBytes = "invalid message".getBytes(); + Message message = new Message(null, invalidMessageBytes); + assertThrows(InvalidProtocolBufferException.class, () -> { + protoMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + "org.raystack.depot.TestMessage"); + }); + } + + @Test + public void shouldParseLogKey() throws IOException { + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + StatsDReporter statsdReporter = mock(StatsDReporter.class); + DepotStencilUpdateListener protoUpdateListener = mock(DepotStencilUpdateListener.class); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(sinkConfig, statsdReporter, protoUpdateListener); + TestMessage testKey = TestMessage.newBuilder().setOrderNumber("order-1").build(); + Message message = new Message(testKey.toByteArray(), null); + ParsedMessage parsedMessage = protoMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_KEY, + "org.raystack.depot.TestMessage"); + assertEquals(testKey, parsedMessage.getRaw()); + + } + + @Test + public void shouldThrowErrorOnInvalidKey() { + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + StatsDReporter statsdReporter = mock(StatsDReporter.class); + DepotStencilUpdateListener protoUpdateListener = mock(DepotStencilUpdateListener.class); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(sinkConfig, statsdReporter, protoUpdateListener); + byte[] invalidKeyBytes = "invalid message".getBytes(); + Message message = new Message(invalidKeyBytes, null); + assertThrows(InvalidProtocolBufferException.class, () -> { + protoMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_KEY, "org.raystack.depot.TestMessage"); + }); + } + + @Test + public void shouldThrowErrorWhenModeNotDefined() { + SinkConfig sinkConfig = ConfigFactory.create(SinkConfig.class, configMap); + StatsDReporter statsdReporter = mock(StatsDReporter.class); + DepotStencilUpdateListener protoUpdateListener = mock(DepotStencilUpdateListener.class); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(sinkConfig, statsdReporter, protoUpdateListener); + byte[] validKeyBytes = TestMessage.newBuilder().setOrderNumber("order-1").build().toByteArray(); + Message message = new Message(validKeyBytes, null); + IOException ioException = assertThrows(IOException.class, () -> { + protoMessageParser.parse(message, null, null); + }); + assertEquals("parser mode not defined", ioException.getMessage()); + } +} diff --git a/src/test/java/io/odpf/depot/message/proto/ProtoOdpfParsedMessageTest.java b/src/test/java/org/raystack/depot/message/proto/ProtoParsedMessageTest.java similarity index 56% rename from src/test/java/io/odpf/depot/message/proto/ProtoOdpfParsedMessageTest.java rename to src/test/java/org/raystack/depot/message/proto/ProtoParsedMessageTest.java index a648c1e9..a02da8bf 100644 --- a/src/test/java/io/odpf/depot/message/proto/ProtoOdpfParsedMessageTest.java +++ b/src/test/java/org/raystack/depot/message/proto/ProtoParsedMessageTest.java @@ -1,16 +1,23 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; import com.google.api.client.util.DateTime; -import com.google.protobuf.*; +import com.google.protobuf.Descriptors; +import com.google.protobuf.Duration; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.ListValue; +import com.google.protobuf.Message; +import com.google.protobuf.Struct; +import com.google.protobuf.Timestamp; +import com.google.protobuf.Value; import com.google.protobuf.util.JsonFormat; -import io.odpf.depot.*; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.proto.converter.fields.MessageProtoField; -import io.odpf.depot.message.proto.converter.fields.ProtoField; -import io.odpf.stencil.Parser; -import io.odpf.stencil.StencilClientFactory; -import io.odpf.stencil.client.StencilClient; +import org.raystack.depot.*; +import org.raystack.depot.message.proto.converter.fields.MessageProtoField; +import org.raystack.depot.message.proto.converter.fields.ProtoField; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.stencil.Parser; +import org.raystack.stencil.StencilClientFactory; +import org.raystack.stencil.client.StencilClient; import org.apache.xerces.impl.dv.util.Base64; import org.json.JSONArray; import org.junit.Assert; @@ -30,7 +37,7 @@ import static org.junit.Assert.*; -public class ProtoOdpfParsedMessageTest { +public class ProtoParsedMessageTest { private static final JsonFormat.Printer PRINTER = JsonFormat.printer() .preservingProtoFieldNames() @@ -39,7 +46,7 @@ public class ProtoOdpfParsedMessageTest { private DynamicMessage dynamicMessage; private Instant now; private long nowMillis; - private ProtoOdpfMessageParser odpfMessageParser; + private ProtoMessageParser messageParser; private Parser parser; @Mock private StencilClient stencilClient; @@ -62,40 +69,45 @@ public void setUp() throws IOException, Descriptors.DescriptorValidationExceptio dynamicMessage = parser.parse(testMessage.toByteArray()); nowMillis = Instant.ofEpochSecond(now.getEpochSecond(), now.getNano()).toEpochMilli(); - descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); - put(String.format("%s", TestMessageBQ.class.getName()), TestMessageBQ.getDescriptor()); - put(String.format("%s", TestNestedMessageBQ.class.getName()), TestNestedMessageBQ.getDescriptor()); - put(String.format("%s", TestNestedRepeatedMessageBQ.class.getName()), TestNestedRepeatedMessageBQ.getDescriptor()); - put(String.format("%s", TestBookingLogMessage.class.getName()), TestBookingLogMessage.getDescriptor()); - put(String.format("%s", TestLocation.class.getName()), TestLocation.getDescriptor()); - put(String.format("%s", TestBookingLogMessage.TopicMetadata.class.getName()), TestBookingLogMessage.TopicMetadata.getDescriptor()); - put(String.format("%s", TestTypesMessage.class.getName()), TestTypesMessage.getDescriptor()); - put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); - put(String.format("%s", FloatTest.class.getName()), FloatTest.getDescriptor()); - put(String.format("%s", FloatTestContainer.class.getName()), FloatTestContainer.getDescriptor()); - put("io.odpf.depot.TestMessageBQ.CurrentStateEntry", TestMessageBQ.getDescriptor().getNestedTypes().get(0)); - put("com.google.protobuf.Struct.FieldsEntry", Struct.getDescriptor().getNestedTypes().get(0)); - put("com.google.protobuf.Duration", com.google.protobuf.Duration.getDescriptor()); - put("com.google.type.Date", com.google.type.Date.getDescriptor()); - put("google.protobuf.BoolValue", com.google.protobuf.BoolValue.getDescriptor()); - }}; - odpfMessageParser = new ProtoOdpfMessageParser(stencilClient); + descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKeyBQ.class.getName()), TestKeyBQ.getDescriptor()); + put(String.format("%s", TestMessageBQ.class.getName()), TestMessageBQ.getDescriptor()); + put(String.format("%s", TestNestedMessageBQ.class.getName()), TestNestedMessageBQ.getDescriptor()); + put(String.format("%s", TestNestedRepeatedMessageBQ.class.getName()), + TestNestedRepeatedMessageBQ.getDescriptor()); + put(String.format("%s", TestBookingLogMessage.class.getName()), TestBookingLogMessage.getDescriptor()); + put(String.format("%s", TestLocation.class.getName()), TestLocation.getDescriptor()); + put(String.format("%s", TestBookingLogMessage.TopicMetadata.class.getName()), + TestBookingLogMessage.TopicMetadata.getDescriptor()); + put(String.format("%s", TestTypesMessage.class.getName()), TestTypesMessage.getDescriptor()); + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + put(String.format("%s", FloatTest.class.getName()), FloatTest.getDescriptor()); + put(String.format("%s", FloatTestContainer.class.getName()), FloatTestContainer.getDescriptor()); + put("org.raystack.depot.TestMessageBQ.CurrentStateEntry", + TestMessageBQ.getDescriptor().getNestedTypes().get(0)); + put("com.google.protobuf.Struct.FieldsEntry", Struct.getDescriptor().getNestedTypes().get(0)); + put("com.google.protobuf.Duration", com.google.protobuf.Duration.getDescriptor()); + put("com.google.type.Date", com.google.type.Date.getDescriptor()); + put("google.protobuf.BoolValue", com.google.protobuf.BoolValue.getDescriptor()); + } + }; + messageParser = new ProtoMessageParser(stencilClient); } @Test public void shouldReturnFieldsInProperties() throws IOException { - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(dynamicMessage).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(dynamicMessage).getMapping(messageSchema); assertEquals("order-1", fields.get("order_number")); assertEquals("order-url", fields.get("order_url")); assertEquals("order-details", fields.get("order_details")); assertEquals(new DateTime(nowMillis), fields.get("created_at")); assertEquals("COMPLETED", fields.get("status")); Map dateFields = (Map) fields.get("order_date"); - assertEquals(1996, dateFields.get("year")); - assertEquals(11, dateFields.get("month")); - assertEquals(21, dateFields.get("day")); + assertEquals(1996L, dateFields.get("year")); + assertEquals(11L, dateFields.get("month")); + assertEquals(21L, dateFields.get("day")); } @Test @@ -103,20 +115,32 @@ public void shouldThrowExceptionWhenFloatingPointIsNaN() throws IOException { String data = "ogQFJQAAwH8="; byte[] decode = Base64.decode(data); DynamicMessage message = DynamicMessage.parseFrom(FloatTest.getDescriptor(), decode); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.FloatTest", descriptorsMap); - Assertions.assertThrows(IllegalArgumentException.class, () -> new ProtoOdpfParsedMessage(message).getMapping(odpfMessageSchema)); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.FloatTest", descriptorsMap); + Assertions.assertThrows(IllegalArgumentException.class, + () -> new ProtoParsedMessage(message).getMapping(messageSchema)); } @Test public void shouldParseDurationMessageSuccessfully() throws IOException { TestMessageBQ message = TestProtoUtil.generateTestMessage(now); Parser messageProtoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(messageProtoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(messageProtoParser.parse(message.toByteArray())) + .getMapping(messageSchema); Map durationFields = (Map) fields.get("trip_duration"); - assertEquals("order-1", fields.get("order_number")); - assertEquals((long) 1, durationFields.get("seconds")); - assertEquals(TestProtoUtil.TRIP_DURATION_NANOS, durationFields.get("nanos")); + assertEquals(message.getOrderNumber(), fields.get("order_number")); + assertEquals(1L, durationFields.get("seconds")); + assertEquals(1000L, durationFields.get("nanos")); + } + + @Test + public void shouldParseBytes() throws IOException { + TestMessageBQ message = TestProtoUtil.generateTestMessage(now); + Parser messageProtoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(messageProtoParser.parse(message.toByteArray())) + .getMapping(messageSchema); + assertEquals("dGVzdC10b2tlbg==", fields.get("user_token")); } @Test @@ -130,8 +154,8 @@ public void shouldParseNestedMessageSuccessfully() { Arrays.asList(nestedMessage1, nestedMessage2).forEach(msg -> { Map fields = null; try { - fields = new ProtoOdpfParsedMessage(protoParser.parse(msg.toByteArray())) - .getMapping(odpfMessageParser.getSchema("io.odpf.depot.TestNestedMessageBQ", descriptorsMap)); + fields = new ProtoParsedMessage(protoParser.parse(msg.toByteArray())) + .getMapping(messageParser.getSchema("org.raystack.depot.TestNestedMessageBQ", descriptorsMap)); } catch (IOException e) { e.printStackTrace(); } @@ -149,8 +173,9 @@ public void shouldParseRepeatedPrimitives() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); assertEquals(orderNumber, fields.get("order_number")); assertEquals(Arrays.asList("alias1", "alias2"), fields.get("aliases")); @@ -168,10 +193,12 @@ public void shouldParseRepeatedNestedMessages() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedRepeatedMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestNestedRepeatedMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestNestedRepeatedMessageBQ", + descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); - assertEquals(number, fields.get("number_field")); + assertEquals((long) number, fields.get("number_field")); List repeatedMessagesMap = (List) fields.get("repeated_message"); assertTestMessageFields((Map) repeatedMessagesMap.get(0), nested1); assertTestMessageFields((Map) repeatedMessagesMap.get(1), nested2); @@ -179,16 +206,17 @@ public void shouldParseRepeatedNestedMessages() throws IOException { @Test public void shouldParseRepeatedNestedMessagesIfRepeatedFieldsAreMissing() throws IOException { - int number = 1234; TestNestedRepeatedMessageBQ message = TestNestedRepeatedMessageBQ.newBuilder() - .setNumberField(number) + .setNumberField(1234) .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedRepeatedMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestNestedRepeatedMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestNestedRepeatedMessageBQ", + descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); - assertEquals(number, fields.get("number_field")); + assertEquals(1234L, fields.get("number_field")); assertEquals(1, fields.size()); } @@ -203,8 +231,9 @@ public void shouldParseMapFields() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); assertEquals(message.getOrderNumber(), fields.get("order_number")); assertEquals(message.getOrderUrl(), fields.get("order_url")); @@ -236,9 +265,9 @@ public void shouldMapStructFields() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); - + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); assertEquals(message.getOrderNumber(), fields.get("order_number")); String expectedProperties = "{\"number\":123.45,\"string\":\"string_val\",\"list\":[1.0,2.0,3.0],\"boolean\":true}"; @@ -263,8 +292,9 @@ private void assertTestMessageFields(Map nestedFields, TestMessageBQ message) { @Test() public void shouldReturnNullWhenIndexNotPresent() throws IOException { - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestNestedRepeatedMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(dynamicMessage).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestNestedRepeatedMessageBQ", + descriptorsMap); + Map fields = new ProtoParsedMessage(dynamicMessage).getMapping(messageSchema); assertNull(fields.get("single_message")); } @@ -276,8 +306,8 @@ public void shouldReturnNullWhenNoDateFieldIsProvided() throws IOException { Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); dynamicMessage = protoParser.parse(testMessage.toByteArray()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(dynamicMessage).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(dynamicMessage).getMapping(messageSchema); assertNull(fields.get("order_date")); } @@ -293,11 +323,12 @@ public void shouldParseRepeatedTimestamp() throws IOException { Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())); - Map fields = protoOdpfParsedMessage.getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(message.toByteArray())); + Map fields = protoParsedMessage.getMapping(messageSchema); - assertEquals(Arrays.asList(new DateTime(now.toEpochMilli()), new DateTime(now.toEpochMilli())), fields.get("updated_at")); + assertEquals(Arrays.asList(new DateTime(now.toEpochMilli()), new DateTime(now.toEpochMilli())), + fields.get("updated_at")); } @Test @@ -309,8 +340,9 @@ public void shouldParseStructField() throws IOException { Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); assertEquals("{\"name\":\"John\",\"age\":\"50\"}", fields.get("properties")); } @@ -325,9 +357,11 @@ public void shouldParseRepeatableStructField() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - Map fields = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())).getMapping(odpfMessageSchema); - assertEquals(Arrays.asList("{\"name\":\"John\",\"age\":\"50\"}", "{\"name\":\"John\",\"age\":\"60\"}"), fields.get("attributes")); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Map fields = new ProtoParsedMessage(protoParser.parse(message.toByteArray())) + .getMapping(messageSchema); + assertEquals(Arrays.asList("{\"name\":\"John\",\"age\":\"50\"}", "{\"name\":\"John\",\"age\":\"60\"}"), + fields.get("attributes")); } @Test @@ -339,26 +373,27 @@ public void shouldCacheMappingForSameSchema() throws IOException { .addAttributes(Struct.newBuilder().putFields("name", Value.newBuilder().setStringValue("John").build()) .putFields("age", Value.newBuilder().setStringValue("60").build()).build()) .build(); - OdpfMessageSchema odpfMessageSchema1 = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - OdpfMessageSchema odpfMessageSchema2 = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ParsedOdpfMessage parsedOdpfMessage = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())); - Map map1 = parsedOdpfMessage.getMapping(odpfMessageSchema1); - Map map2 = parsedOdpfMessage.getMapping(odpfMessageSchema2); + MessageSchema messageSchema1 = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + MessageSchema messageSchema2 = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ParsedMessage parsedMessage = new ProtoParsedMessage(protoParser.parse(message.toByteArray())); + Map map1 = parsedMessage.getMapping(messageSchema1); + Map map2 = parsedMessage.getMapping(messageSchema2); assertEquals(map1, map2); } - @Test public void shouldGetFieldByName() throws IOException { - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(dynamicMessage); - Object orderNumber = ((ProtoField) protoOdpfParsedMessage.getFieldByName("order_number", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(dynamicMessage); + Object orderNumber = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("order_number", messageSchema)).getValue(); Assert.assertEquals("order-1", orderNumber); } @Test public void shouldGetComplexFieldByName() throws IOException { - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestBookingLogMessage", descriptorsMap); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestBookingLogMessage", + descriptorsMap); TestBookingLogMessage testBookingLogMessage = TestBookingLogMessage.newBuilder() .setCustomerName("johndoe") .addTopics(TestBookingLogMessage.TopicMetadata.newBuilder() @@ -370,25 +405,26 @@ public void shouldGetComplexFieldByName() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestBookingLogMessage.class.getName()); DynamicMessage bookingLogDynamicMessage = protoParser.parse(testBookingLogMessage.toByteArray()); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(bookingLogDynamicMessage); - ProtoField f = (ProtoField) protoOdpfParsedMessage.getFieldByName("topics", odpfMessageSchema); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(bookingLogDynamicMessage); + org.raystack.depot.message.proto.converter.fields.ProtoField f = (org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("topics", messageSchema); Assert.assertTrue(f instanceof MessageProtoField); Assert.assertTrue(f.getValue() instanceof Collection); List list = (List) f.getValue(); Assert.assertEquals(TestBookingLogMessage.TopicMetadata.newBuilder() - .setQos(1) - .setTopic("hellowo/rl/dcom.world.partner").build(), + .setQos(1) + .setTopic("hellowo/rl/dcom.world.partner").build(), list.get(0)); Assert.assertEquals(TestBookingLogMessage.TopicMetadata.newBuilder() - .setQos(123) - .setTopic("my-topic").build(), + .setQos(123) + .setTopic("my-topic").build(), list.get(1)); } - @Test public void shouldGetStructFromProto() throws IOException { - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestBookingLogMessage", descriptorsMap); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestBookingLogMessage", + descriptorsMap); TestBookingLogMessage testBookingLogMessage = TestBookingLogMessage.newBuilder() .setCustomerName("johndoe") .addTopics(TestBookingLogMessage.TopicMetadata.newBuilder() @@ -398,9 +434,11 @@ public void shouldGetStructFromProto() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestBookingLogMessage.class.getName()); DynamicMessage bookingLogDynamicMessage = protoParser.parse(testBookingLogMessage.toByteArray()); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(bookingLogDynamicMessage); - Object driverPickupLocation = ((ProtoField) protoOdpfParsedMessage.getFieldByName("driver_pickup_location", odpfMessageSchema)).getValue(); - Assert.assertEquals(TestLocation.newBuilder().setLatitude(10.0).setLongitude(12.0).build(), driverPickupLocation); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(bookingLogDynamicMessage); + Object driverPickupLocation = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("driver_pickup_location", messageSchema)).getValue(); + Assert.assertEquals(TestLocation.newBuilder().setLatitude(10.0).setLongitude(12.0).build(), + driverPickupLocation); } @Test @@ -416,10 +454,11 @@ public void shouldGetRepeatableStructField() throws IOException { .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())); - List attributes = (List) ((ProtoField) (protoOdpfParsedMessage.getFieldByName("attributes", odpfMessageSchema))).getValue(); - protoOdpfParsedMessage.getMapping(odpfMessageSchema); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(message.toByteArray())); + List attributes = (List) ((org.raystack.depot.message.proto.converter.fields.ProtoField) (protoParsedMessage + .getFieldByName("attributes", messageSchema))).getValue(); + protoParsedMessage.getMapping(messageSchema); JSONArray expectedArray = new JSONArray(); JSONArray actualArray = new JSONArray(); for (int ii = 0; ii < message.getAttributesCount(); ii++) { @@ -438,37 +477,42 @@ public void shouldGetNumberFields() throws IOException { .setPrice(10.2f) .build(); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())); - Object discount = ((ProtoField) protoOdpfParsedMessage.getFieldByName("discount", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(message.toByteArray())); + Object discount = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("discount", messageSchema)).getValue(); Assert.assertEquals(10000012010L, discount); - float price = (float) ((ProtoField) protoOdpfParsedMessage.getFieldByName("price", odpfMessageSchema)).getValue(); - Assert.assertEquals(10.2f, price, 0.00000000001); + double price = (double) ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("price", messageSchema)).getValue(); + Assert.assertEquals(10.2D, price, 0.00000000001); } @Test public void shouldGetRepeatedTimeStamps() throws IOException { TestMessageBQ message1 = TestProtoUtil.generateTestMessage(now); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(message1.toByteArray())); - Object updatedTimeStamps = ((ProtoField) protoOdpfParsedMessage.getFieldByName("updated_at", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(message1.toByteArray())); + Object updatedTimeStamps = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("updated_at", messageSchema)).getValue(); Assert.assertEquals(2, ((List) updatedTimeStamps).size()); Assert.assertEquals(now, ((List) updatedTimeStamps).get(0)); Assert.assertEquals(now, ((List) updatedTimeStamps).get(1)); } - @Test public void shouldGetFieldByNameFromNested() throws IOException { TestMessageBQ message1 = TestProtoUtil.generateTestMessage(now); Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestNestedMessageBQ", descriptorsMap); - TestNestedMessageBQ nestedMessage = TestNestedMessageBQ.newBuilder().setNestedId("test").setSingleMessage(message1).build(); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(nestedMessage.toByteArray())); - Object nestedId = ((ProtoField) protoOdpfParsedMessage.getFieldByName("nested_id", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestNestedMessageBQ", descriptorsMap); + TestNestedMessageBQ nestedMessage = TestNestedMessageBQ.newBuilder().setNestedId("test") + .setSingleMessage(message1).build(); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(nestedMessage.toByteArray())); + Object nestedId = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("nested_id", messageSchema)).getValue(); Assert.assertEquals("test", nestedId); - Object orderNumber = ((ProtoField) protoOdpfParsedMessage.getFieldByName("single_message.order_number", odpfMessageSchema)).getValue(); + Object orderNumber = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("single_message.order_number", messageSchema)).getValue(); Assert.assertEquals(message1.getOrderNumber(), orderNumber); } @@ -476,17 +520,19 @@ public void shouldGetFieldByNameFromNested() throws IOException { public void shouldReturnInstantField() throws IOException { Instant time = Instant.ofEpochSecond(1669160207, 600000000); TestMessageBQ message1 = TestProtoUtil.generateTestMessage(time); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(parser.parse(message1.toByteArray())); - Assert.assertEquals(time, ((ProtoField) protoOdpfParsedMessage.getFieldByName("created_at", odpfMessageSchema)).getValue()); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(parser.parse(message1.toByteArray())); + Assert.assertEquals(time, ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("created_at", messageSchema)).getValue()); } @Test public void shouldReturnDurationFieldInStringFormat() throws IOException { TestMessageBQ message1 = TestProtoUtil.generateTestMessage(now); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(parser.parse(message1.toByteArray())); - Object tripDuration = ((ProtoField) protoOdpfParsedMessage.getFieldByName("trip_duration", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(parser.parse(message1.toByteArray())); + Object tripDuration = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("trip_duration", messageSchema)).getValue(); Assert.assertEquals( Duration.newBuilder().setSeconds(1).setNanos(TestProtoUtil.TRIP_DURATION_NANOS).build(), tripDuration); @@ -495,9 +541,10 @@ public void shouldReturnDurationFieldInStringFormat() throws IOException { @Test public void shouldReturnMapFieldAsJSONObject() throws IOException { TestMessageBQ message1 = TestMessageBQ.newBuilder().putCurrentState("running", "active").build(); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(parser.parse(message1.toByteArray())); - Object currentState = ((ProtoField) protoOdpfParsedMessage.getFieldByName("current_state", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(parser.parse(message1.toByteArray())); + Object currentState = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("current_state", messageSchema)).getValue(); Assert.assertTrue(currentState instanceof List); Assert.assertEquals(1, ((List) currentState).size()); Message m = (Message) ((List) currentState).get(0); @@ -509,9 +556,10 @@ public void shouldReturnMapFieldAsJSONObject() throws IOException { @Test public void shouldReturnDefaultValueForFieldIfValueIsNotSet() throws IOException { TestMessageBQ emptyMessage = TestMessageBQ.newBuilder().build(); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(parser.parse(emptyMessage.toByteArray())); - String orderNumber = (String) ((ProtoField) protoOdpfParsedMessage.getFieldByName("order_number", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(parser.parse(emptyMessage.toByteArray())); + String orderNumber = (String) ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("order_number", messageSchema)).getValue(); Assert.assertEquals("", orderNumber); } @@ -519,12 +567,15 @@ public void shouldReturnDefaultValueForFieldIfValueIsNotSet() throws IOException public void shouldThrowExceptionIfColumnIsNotPresentInProto() throws IOException { TestMessageBQ message1 = TestProtoUtil.generateTestMessage(now); Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestNestedMessageBQ", descriptorsMap); - TestNestedMessageBQ nestedMessage = TestNestedMessageBQ.newBuilder().setNestedId("test").setSingleMessage(message1).build(); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(nestedMessage.toByteArray())); - String nestedId = (String) ((ProtoField) protoOdpfParsedMessage.getFieldByName("nested_id", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestNestedMessageBQ", descriptorsMap); + TestNestedMessageBQ nestedMessage = TestNestedMessageBQ.newBuilder().setNestedId("test") + .setSingleMessage(message1).build(); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(nestedMessage.toByteArray())); + String nestedId = (String) ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("nested_id", messageSchema)).getValue(); Assert.assertEquals("test", nestedId); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> protoOdpfParsedMessage.getFieldByName("single_message.order_id", odpfMessageSchema)); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> protoParsedMessage.getFieldByName("single_message.order_id", messageSchema)); Assert.assertEquals("Invalid field config : single_message.order_id", exception.getMessage()); } @@ -532,21 +583,24 @@ public void shouldThrowExceptionIfColumnIsNotPresentInProto() throws IOException public void shouldThrowExceptionIfColumnIsNotNested() throws IOException { TestMessageBQ message1 = TestProtoUtil.generateTestMessage(now); Parser protoParser = StencilClientFactory.getClient().getParser(TestNestedMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestNestedMessageBQ", descriptorsMap); - TestNestedMessageBQ nestedMessage = TestNestedMessageBQ.newBuilder().setNestedId("test").setSingleMessage(message1).build(); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(nestedMessage.toByteArray())); - String nestedId = (String) ((ProtoField) protoOdpfParsedMessage.getFieldByName("nested_id", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestNestedMessageBQ", descriptorsMap); + TestNestedMessageBQ nestedMessage = TestNestedMessageBQ.newBuilder().setNestedId("test") + .setSingleMessage(message1).build(); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(nestedMessage.toByteArray())); + String nestedId = (String) ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("nested_id", messageSchema)).getValue(); Assert.assertEquals("test", nestedId); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> protoOdpfParsedMessage.getFieldByName("nested_id.order_id", odpfMessageSchema)); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> protoParsedMessage.getFieldByName("nested_id.order_id", messageSchema)); Assert.assertEquals("Invalid field config : nested_id.order_id", exception.getMessage()); } - @Test public void shouldThrowExceptionIfFieldIsEmpty() throws IOException { - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(dynamicMessage); - IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, () -> protoOdpfParsedMessage.getFieldByName("", odpfMessageSchema)); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(dynamicMessage); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> protoParsedMessage.getFieldByName("", messageSchema)); Assert.assertEquals("Invalid field config : name can not be empty", exception.getMessage()); } @@ -554,10 +608,11 @@ public void shouldThrowExceptionIfFieldIsEmpty() throws IOException { public void shouldReturnRepeatedDurations() throws IOException { TestMessageBQ message1 = TestProtoUtil.generateTestMessage(now); Parser protoParser = StencilClientFactory.getClient().getParser(TestMessageBQ.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(message1.toByteArray())); - protoOdpfParsedMessage.getMapping(odpfMessageSchema); - Object intervals = ((ProtoField) protoOdpfParsedMessage.getFieldByName("intervals", odpfMessageSchema)).getValue(); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(message1.toByteArray())); + protoParsedMessage.getMapping(messageSchema); + Object intervals = ((org.raystack.depot.message.proto.converter.fields.ProtoField) protoParsedMessage + .getFieldByName("intervals", messageSchema)).getValue(); Assert.assertEquals(Duration.newBuilder().setSeconds(12).setNanos(1000).build(), ((List) intervals).get(0)); Assert.assertEquals(Duration.newBuilder().setSeconds(15).setNanos(1000).build(), ((List) intervals).get(1)); } @@ -570,10 +625,12 @@ public void shouldReturnRepeatedString() throws IOException { .addListValues("test2") .addListValues("test3") .build(); - Parser protoParser = StencilClientFactory.getClient().getParser(TestTypesMessage.class.getName()); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestTypesMessage", descriptorsMap); - ProtoOdpfParsedMessage protoOdpfParsedMessage = new ProtoOdpfParsedMessage(protoParser.parse(message.toByteArray())); - List listValues = (List) ((ProtoField) protoOdpfParsedMessage.getFieldByName("list_values", odpfMessageSchema)).getValue(); + org.raystack.stencil.Parser protoParser = StencilClientFactory.getClient() + .getParser(TestTypesMessage.class.getName()); + MessageSchema messageSchema = messageParser.getSchema("org.raystack.depot.TestTypesMessage", descriptorsMap); + ProtoParsedMessage protoParsedMessage = new ProtoParsedMessage(protoParser.parse(message.toByteArray())); + List listValues = (List) ((ProtoField) protoParsedMessage.getFieldByName("list_values", messageSchema)) + .getValue(); Assert.assertEquals("test1", listValues.get(0)); Assert.assertEquals("test2", listValues.get(1)); Assert.assertEquals("test3", listValues.get(2)); diff --git a/src/test/java/io/odpf/depot/message/proto/TestProtoUtil.java b/src/test/java/org/raystack/depot/message/proto/TestProtoUtil.java similarity index 71% rename from src/test/java/io/odpf/depot/message/proto/TestProtoUtil.java rename to src/test/java/org/raystack/depot/message/proto/TestProtoUtil.java index 7f1d5779..5386f2c0 100644 --- a/src/test/java/io/odpf/depot/message/proto/TestProtoUtil.java +++ b/src/test/java/org/raystack/depot/message/proto/TestProtoUtil.java @@ -1,11 +1,12 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; +import com.google.protobuf.ByteString; import com.google.protobuf.DescriptorProtos; import com.google.protobuf.Duration; import com.google.protobuf.Timestamp; -import io.odpf.depot.StatusBQ; -import io.odpf.depot.TestMessageBQ; -import io.odpf.depot.TestNestedMessageBQ; +import org.raystack.depot.StatusBQ; +import org.raystack.depot.TestMessageBQ; +import org.raystack.depot.TestNestedMessageBQ; import java.time.Instant; import java.util.ArrayList; @@ -27,12 +28,15 @@ public static TestMessageBQ generateTestMessage(Instant now) { .setOrderDetails("order-details-" + call) .setCreatedAt(createdAt) .setPrice(PRICE) + .setUserToken(ByteString.copyFrom("test-token".getBytes())) .setStatus(StatusBQ.COMPLETED) .setTripDuration(Duration.newBuilder().setSeconds(1).setNanos(TRIP_DURATION_NANOS).build()) .addUpdatedAt(createdAt) .addUpdatedAt(createdAt) - .addIntervals(Duration.newBuilder().setSeconds(TRIP_DURATION_SECONDS_1).setNanos(TRIP_DURATION_NANOS).build()) - .addIntervals(Duration.newBuilder().setSeconds(TRIP_DURATION_SECONDS_2).setNanos(TRIP_DURATION_NANOS).build()) + .addIntervals( + Duration.newBuilder().setSeconds(TRIP_DURATION_SECONDS_1).setNanos(TRIP_DURATION_NANOS).build()) + .addIntervals( + Duration.newBuilder().setSeconds(TRIP_DURATION_SECONDS_2).setNanos(TRIP_DURATION_NANOS).build()) .build(); } @@ -44,7 +48,8 @@ public static TestNestedMessageBQ generateTestNestedMessage(String nestedId, Tes .build(); } - public static ProtoField createProtoField(String name, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label) { + public static ProtoField createProtoField(String name, DescriptorProtos.FieldDescriptorProto.Type type, + DescriptorProtos.FieldDescriptorProto.Label label) { return new ProtoField(name, "", type, label, new ArrayList<>(), 0); } @@ -52,11 +57,14 @@ public static ProtoField createProtoField(List subFields) { return new ProtoField("", "", null, null, subFields, 0); } - public static ProtoField createProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label) { + public static ProtoField createProtoField(String name, String typeName, + DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label) { return new ProtoField(name, typeName, type, label, new ArrayList<>(), 0); } - public static ProtoField createProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label, List fields) { + public static ProtoField createProtoField(String name, String typeName, + DescriptorProtos.FieldDescriptorProto.Type type, DescriptorProtos.FieldDescriptorProto.Label label, + List fields) { return new ProtoField(name, typeName, type, label, fields, 0); } @@ -64,7 +72,8 @@ public static ProtoField createProtoField(String name, int index) { return new ProtoField(name, "", null, null, new ArrayList<>(), index); } - public static ProtoField createProtoField(String name, String typeName, DescriptorProtos.FieldDescriptorProto.Type type, int index, List fields) { + public static ProtoField createProtoField(String name, String typeName, + DescriptorProtos.FieldDescriptorProto.Type type, int index, List fields) { return new ProtoField(name, typeName, type, null, fields, index); } } diff --git a/src/test/java/io/odpf/depot/message/proto/UnknownProtoFieldsTest.java b/src/test/java/org/raystack/depot/message/proto/UnknownProtoFieldsTest.java similarity index 88% rename from src/test/java/io/odpf/depot/message/proto/UnknownProtoFieldsTest.java rename to src/test/java/org/raystack/depot/message/proto/UnknownProtoFieldsTest.java index cc8d0c9f..25dda721 100644 --- a/src/test/java/io/odpf/depot/message/proto/UnknownProtoFieldsTest.java +++ b/src/test/java/org/raystack/depot/message/proto/UnknownProtoFieldsTest.java @@ -1,12 +1,11 @@ -package io.odpf.depot.message.proto; +package org.raystack.depot.message.proto; -import io.odpf.depot.TestMessage; +import org.raystack.depot.TestMessage; import org.junit.Assert; import org.junit.Test; import java.nio.charset.StandardCharsets; - public class UnknownProtoFieldsTest { @Test diff --git a/src/test/java/io/odpf/depot/message/proto/converter/fields/ByteProtoFieldTest.java b/src/test/java/org/raystack/depot/message/proto/converter/fields/ByteProtoFieldTest.java similarity index 89% rename from src/test/java/io/odpf/depot/message/proto/converter/fields/ByteProtoFieldTest.java rename to src/test/java/org/raystack/depot/message/proto/converter/fields/ByteProtoFieldTest.java index d907bbd1..d43a7b1a 100644 --- a/src/test/java/io/odpf/depot/message/proto/converter/fields/ByteProtoFieldTest.java +++ b/src/test/java/org/raystack/depot/message/proto/converter/fields/ByteProtoFieldTest.java @@ -1,8 +1,8 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.ByteString; import com.google.protobuf.Descriptors; -import io.odpf.depot.TestBytesMessage; +import org.raystack.depot.TestBytesMessage; import org.junit.Before; import org.junit.Test; @@ -11,7 +11,6 @@ import static org.junit.Assert.*; - public class ByteProtoFieldTest { private ByteProtoField byteProtoField; @@ -43,7 +42,8 @@ public void shouldMatchByteProtobufField() { public void shouldNotMatchFieldOtherThanByteProtobufField() { TestBytesMessage bytesMessage = TestBytesMessage.newBuilder() .build(); - Descriptors.FieldDescriptor fieldDescriptor = bytesMessage.getDescriptorForType().findFieldByName("order_number"); + Descriptors.FieldDescriptor fieldDescriptor = bytesMessage.getDescriptorForType() + .findFieldByName("order_number"); byteProtoField = new ByteProtoField(fieldDescriptor, bytesMessage.getField(fieldDescriptor)); assertFalse(byteProtoField.matches()); diff --git a/src/test/java/io/odpf/depot/message/proto/converter/fields/DefaultProtoFieldTest.java b/src/test/java/org/raystack/depot/message/proto/converter/fields/DefaultProtoFieldTest.java similarity index 80% rename from src/test/java/io/odpf/depot/message/proto/converter/fields/DefaultProtoFieldTest.java rename to src/test/java/org/raystack/depot/message/proto/converter/fields/DefaultProtoFieldTest.java index 0e0dff53..cdc0cc4e 100644 --- a/src/test/java/io/odpf/depot/message/proto/converter/fields/DefaultProtoFieldTest.java +++ b/src/test/java/org/raystack/depot/message/proto/converter/fields/DefaultProtoFieldTest.java @@ -1,9 +1,9 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.depot.TestMessage; +import org.raystack.depot.TestMessage; import org.junit.Test; import static org.junit.Assert.assertEquals; @@ -15,8 +15,10 @@ public class DefaultProtoFieldTest { public void shouldReturnProtobufElementsAsItIs() throws InvalidProtocolBufferException { String orderNumber = "123X"; TestMessage testMessage = TestMessage.newBuilder().setOrderNumber(orderNumber).build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testMessage.getDescriptorForType(), testMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("order_number"); + DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testMessage.getDescriptorForType(), + testMessage.toByteArray()); + Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType() + .findFieldByName("order_number"); DefaultProtoField defaultProtoField = new DefaultProtoField(dynamicMessage.getField(fieldDescriptor)); Object value = defaultProtoField.getValue(); diff --git a/src/test/java/io/odpf/depot/message/proto/converter/fields/EnumProtoFieldTest.java b/src/test/java/org/raystack/depot/message/proto/converter/fields/EnumProtoFieldTest.java similarity index 80% rename from src/test/java/io/odpf/depot/message/proto/converter/fields/EnumProtoFieldTest.java rename to src/test/java/org/raystack/depot/message/proto/converter/fields/EnumProtoFieldTest.java index 8ef05240..147aac79 100644 --- a/src/test/java/io/odpf/depot/message/proto/converter/fields/EnumProtoFieldTest.java +++ b/src/test/java/org/raystack/depot/message/proto/converter/fields/EnumProtoFieldTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; import com.google.protobuf.InvalidProtocolBufferException; -import io.odpf.depot.TestEnumMessage; -import io.odpf.depot.TestStatus; +import org.raystack.depot.TestEnumMessage; +import org.raystack.depot.TestStatus; import org.junit.Before; import org.junit.Test; @@ -20,8 +20,10 @@ public class EnumProtoFieldTest { @Before public void setUp() throws Exception { TestEnumMessage testEnumMessage = TestEnumMessage.newBuilder().setLastStatus(TestStatus.Enum.CREATED).build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testEnumMessage.getDescriptorForType(), testEnumMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("last_status"); + DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testEnumMessage.getDescriptorForType(), + testEnumMessage.toByteArray()); + Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType() + .findFieldByName("last_status"); enumProtoField = new EnumProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); } @@ -37,8 +39,10 @@ public void shouldConvertRepeatedProtobufEnumToListOfString() throws InvalidProt .addStatusHistory(TestStatus.Enum.CREATED) .addStatusHistory(TestStatus.Enum.IN_PROGRESS) .build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testEnumMessage.getDescriptorForType(), testEnumMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("status_history"); + DynamicMessage dynamicMessage = DynamicMessage.parseFrom(testEnumMessage.getDescriptorForType(), + testEnumMessage.toByteArray()); + Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType() + .findFieldByName("status_history"); enumProtoField = new EnumProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); Object fieldValue = enumProtoField.getValue(); diff --git a/src/test/java/io/odpf/depot/message/proto/converter/fields/MessageProtoFieldTest.java b/src/test/java/org/raystack/depot/message/proto/converter/fields/MessageProtoFieldTest.java similarity index 80% rename from src/test/java/io/odpf/depot/message/proto/converter/fields/MessageProtoFieldTest.java rename to src/test/java/org/raystack/depot/message/proto/converter/fields/MessageProtoFieldTest.java index 14528037..fe85fbbd 100644 --- a/src/test/java/io/odpf/depot/message/proto/converter/fields/MessageProtoFieldTest.java +++ b/src/test/java/org/raystack/depot/message/proto/converter/fields/MessageProtoFieldTest.java @@ -1,9 +1,9 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; -import io.odpf.depot.TestMessage; -import io.odpf.depot.TestNestedMessage; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestNestedMessage; import org.junit.Before; import org.junit.Test; @@ -23,9 +23,11 @@ public void setUp() throws Exception { TestNestedMessage nestedMessage = TestNestedMessage.newBuilder() .setSingleMessage(childField) .build(); - DynamicMessage dynamicMessage = DynamicMessage.parseFrom(nestedMessage.getDescriptorForType(), nestedMessage.toByteArray()); + DynamicMessage dynamicMessage = DynamicMessage.parseFrom(nestedMessage.getDescriptorForType(), + nestedMessage.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = nestedMessage.getDescriptorForType().findFieldByName("single_message"); + Descriptors.FieldDescriptor fieldDescriptor = nestedMessage.getDescriptorForType() + .findFieldByName("single_message"); messageProtoField = new MessageProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); } diff --git a/src/test/java/org/raystack/depot/message/proto/converter/fields/StructProtoFieldTest.java b/src/test/java/org/raystack/depot/message/proto/converter/fields/StructProtoFieldTest.java new file mode 100644 index 00000000..8d47faf5 --- /dev/null +++ b/src/test/java/org/raystack/depot/message/proto/converter/fields/StructProtoFieldTest.java @@ -0,0 +1,101 @@ +package org.raystack.depot.message.proto.converter.fields; + +import com.google.protobuf.*; +import org.raystack.depot.TestStructMessage; +import org.junit.Before; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; + +public class StructProtoFieldTest { + + private StructProtoField structProtoField; + private Struct structValue; + + @Before + public void setUp() throws Exception { + List listValues = new ArrayList<>(); + listValues.add(Value.newBuilder().setNumberValue(1).build()); + listValues.add(Value.newBuilder().setNumberValue(2).build()); + + structValue = Struct.newBuilder() + .putFields("null_value", Value.newBuilder().setNullValue(NullValue.NULL_VALUE) + .build()) + .putFields("number_value", Value.newBuilder().setNumberValue(2.0).build()) + .putFields("string_value", Value.newBuilder().setStringValue("").build()) + .putFields("bool_value", Value.newBuilder().setBoolValue(false).build()) + .putFields("struct_value", Value.newBuilder().setStructValue( + Struct.newBuilder() + .putFields("child_value1", + Value.newBuilder().setNumberValue(1.0) + .build()) + .build()) + .build()) + .putFields("list_value", Value.newBuilder().setListValue(ListValue.newBuilder() + .addAllValues(listValues).build()).build()) + .build(); + TestStructMessage message = TestStructMessage.newBuilder() + .setOrderNumber("123X") + .setCustomFields(structValue) + .build(); + + DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), + message.toByteArray()); + Descriptors.FieldDescriptor customValues = dynamicMessage.getDescriptorForType() + .findFieldByName("custom_fields"); + structProtoField = new StructProtoField(customValues, dynamicMessage.getField(customValues)); + } + + @Test + public void shouldSerialiseStructIntoJson() { + String value = (String) structProtoField.getValue(); + String jsonStr = "{\"null_value\":null," + + "\"number_value\":2.0," + + "\"string_value\":\"\"," + + "\"bool_value\":false," + + "\"struct_value\":{\"child_value1\":1.0}," + + "\"list_value\":[1.0,2.0]}"; + + assertEquals(jsonStr, value); + } + + @Test + public void shouldSerialiseRepeatedStructsIntoJson() throws InvalidProtocolBufferException { + Struct simpleStruct = Struct.newBuilder() + .putFields("null_value", Value.newBuilder().setNullValue(NullValue.NULL_VALUE) + .build()) + .putFields("number_value", Value.newBuilder().setNumberValue(2.0).build()) + .build(); + + TestStructMessage message = TestStructMessage.newBuilder() + .setOrderNumber("123X") + .addListCustomFields(simpleStruct) + .addListCustomFields(simpleStruct) + .build(); + + DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), + message.toByteArray()); + Descriptors.FieldDescriptor listCustomFieldsDescriptor = dynamicMessage.getDescriptorForType() + .findFieldByName("list_custom_fields"); + structProtoField = new StructProtoField(listCustomFieldsDescriptor, + dynamicMessage.getField(listCustomFieldsDescriptor)); + + Object value = structProtoField.getValue(); + + List jsonStrList = new ArrayList<>(); + jsonStrList.add("{\"null_value\":null,\"number_value\":2.0}"); + jsonStrList.add("{\"null_value\":null,\"number_value\":2.0}"); + + assertEquals(jsonStrList, value); + } + + @Test + public void shouldMatchStruct() { + boolean isMatch = structProtoField.matches(); + assertTrue(isMatch); + } +} diff --git a/src/test/java/io/odpf/depot/message/proto/converter/fields/TimestampProtoFieldTest.java b/src/test/java/org/raystack/depot/message/proto/converter/fields/TimestampProtoFieldTest.java similarity index 88% rename from src/test/java/io/odpf/depot/message/proto/converter/fields/TimestampProtoFieldTest.java rename to src/test/java/org/raystack/depot/message/proto/converter/fields/TimestampProtoFieldTest.java index 7730f71a..c19730d7 100644 --- a/src/test/java/io/odpf/depot/message/proto/converter/fields/TimestampProtoFieldTest.java +++ b/src/test/java/org/raystack/depot/message/proto/converter/fields/TimestampProtoFieldTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.message.proto.converter.fields; +package org.raystack.depot.message.proto.converter.fields; import com.google.protobuf.Descriptors; import com.google.protobuf.DynamicMessage; import com.google.protobuf.InvalidProtocolBufferException; import com.google.protobuf.Timestamp; -import io.odpf.depot.TestDurationMessage; +import org.raystack.depot.TestDurationMessage; import org.junit.Before; import org.junit.Test; @@ -27,7 +27,8 @@ public void setUp() throws Exception { .build()) .build(); DynamicMessage dynamicMessage = DynamicMessage.parseFrom(message.getDescriptorForType(), message.toByteArray()); - Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType().findFieldByName("event_timestamp"); + Descriptors.FieldDescriptor fieldDescriptor = dynamicMessage.getDescriptorForType() + .findFieldByName("event_timestamp"); timestampProtoField = new TimestampProtoField(fieldDescriptor, dynamicMessage.getField(fieldDescriptor)); } diff --git a/src/test/java/io/odpf/depot/metrics/InstrumentationTest.java b/src/test/java/org/raystack/depot/metrics/InstrumentationTest.java similarity index 81% rename from src/test/java/io/odpf/depot/metrics/InstrumentationTest.java rename to src/test/java/org/raystack/depot/metrics/InstrumentationTest.java index beaf93d9..9a5e6afb 100644 --- a/src/test/java/io/odpf/depot/metrics/InstrumentationTest.java +++ b/src/test/java/org/raystack/depot/metrics/InstrumentationTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.metrics; +package org.raystack.depot.metrics; import org.junit.Before; import org.junit.Test; @@ -35,7 +35,7 @@ public void setUp() { @Test public void shouldLogString() { instrumentation.logInfo(testMessage); - verify(logger, times(1)).info(testMessage, new Object[]{}); + verify(logger, times(1)).info(testMessage, new Object[] {}); } @Test @@ -65,9 +65,11 @@ public void shouldLogErrorStringTemplate() { @Test public void shouldCaptureNonFatalErrorWithStringMessage() { instrumentation.captureNonFatalError("test_metric", e, testMessage); - verify(logger, times(1)).warn(testMessage, new Object[]{}); + verify(logger, times(1)).warn(testMessage, new Object[] {}); verify(logger, times(1)).warn(e.getMessage(), e); - verify(statsDReporter, times(1)).recordEvent("test_metric", SinkMetrics.NON_FATAL_ERROR, SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + SinkMetrics.NON_FATAL_ERROR); + verify(statsDReporter, times(1)).recordEvent("test_metric", SinkMetrics.NON_FATAL_ERROR, + SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + + SinkMetrics.NON_FATAL_ERROR); } @Test @@ -75,15 +77,19 @@ public void shouldCaptureNonFatalErrorWithStringTemplate() { instrumentation.captureNonFatalError("test_metric", e, testTemplate, 1, 2, 3); verify(logger, times(1)).warn(testTemplate, 1, 2, 3); verify(logger, times(1)).warn(e.getMessage(), e); - verify(statsDReporter, times(1)).recordEvent("test_metric", SinkMetrics.NON_FATAL_ERROR, SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + SinkMetrics.NON_FATAL_ERROR); + verify(statsDReporter, times(1)).recordEvent("test_metric", SinkMetrics.NON_FATAL_ERROR, + SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + + SinkMetrics.NON_FATAL_ERROR); } @Test public void shouldCaptureFatalErrorWithStringMessage() { instrumentation.captureFatalError("test_metric", e, testMessage); - verify(logger, times(1)).error(testMessage, new Object[]{}); + verify(logger, times(1)).error(testMessage, new Object[] {}); verify(logger, times(1)).error(e.getMessage(), e); - verify(statsDReporter, times(1)).recordEvent("test_metric", SinkMetrics.FATAL_ERROR, SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + SinkMetrics.FATAL_ERROR); + verify(statsDReporter, times(1)).recordEvent("test_metric", SinkMetrics.FATAL_ERROR, + SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + + SinkMetrics.FATAL_ERROR); } @Test @@ -91,7 +97,9 @@ public void shouldCaptureFatalErrorWithStringTemplate() { instrumentation.captureFatalError("test", e, testTemplate, 1, 2, 3); verify(logger, times(1)).error(testTemplate, 1, 2, 3); verify(logger, times(1)).error(e.getMessage(), e); - verify(statsDReporter, times(1)).recordEvent("test", SinkMetrics.FATAL_ERROR, SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + SinkMetrics.FATAL_ERROR); + verify(statsDReporter, times(1)).recordEvent("test", SinkMetrics.FATAL_ERROR, + SinkMetrics.ERROR_MESSAGE_CLASS_TAG + "=" + e.getClass().getName() + ",type=" + + SinkMetrics.FATAL_ERROR); } @Test @@ -115,7 +123,7 @@ public void shouldIncrementCounterWithTags() { public void shouldIncrementCounter() { String metric = "test_metric"; instrumentation.incrementCounter(metric); - verify(statsDReporter, times(1)).increment(metric, new String[]{}); + verify(statsDReporter, times(1)).increment(metric, new String[] {}); } @Test diff --git a/src/test/java/org/raystack/depot/metrics/MetricsTest.java b/src/test/java/org/raystack/depot/metrics/MetricsTest.java new file mode 100644 index 00000000..abbbba94 --- /dev/null +++ b/src/test/java/org/raystack/depot/metrics/MetricsTest.java @@ -0,0 +1,4 @@ +package org.raystack.depot.metrics; + +public class MetricsTest { +} diff --git a/src/test/java/io/odpf/depot/redis/RedisSinkTest.java b/src/test/java/org/raystack/depot/redis/RedisSinkTest.java similarity index 66% rename from src/test/java/io/odpf/depot/redis/RedisSinkTest.java rename to src/test/java/org/raystack/depot/redis/RedisSinkTest.java index cc45e9d7..7bbd9f4d 100644 --- a/src/test/java/io/odpf/depot/redis/RedisSinkTest.java +++ b/src/test/java/org/raystack/depot/redis/RedisSinkTest.java @@ -1,16 +1,16 @@ -package io.odpf.depot.redis; +package org.raystack.depot.redis; -import io.odpf.depot.OdpfSinkResponse; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.RedisClient; -import io.odpf.depot.redis.client.entry.RedisListEntry; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.parsers.RedisParser; -import io.odpf.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.client.RedisClient; +import org.raystack.depot.redis.client.entry.RedisListEntry; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.parsers.RedisParser; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.SinkResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.message.Message; +import org.raystack.depot.metrics.Instrumentation; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -36,7 +36,7 @@ public class RedisSinkTest { @Test public void shouldPushToSink() { - List messages = new ArrayList<>(); + List messages = new ArrayList<>(); List records = new ArrayList<>(); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 0L, null, null, true)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 1L, null, null, true)); @@ -52,17 +52,19 @@ public void shouldPushToSink() { when(redisParser.convert(messages)).thenReturn(records); when(redisClient.send(records)).thenReturn(responses); RedisSink redisSink = new RedisSink(redisClient, redisParser, instrumentation); - OdpfSinkResponse odpfSinkResponse = redisSink.pushToSink(messages); - Assert.assertFalse(odpfSinkResponse.hasErrors()); + SinkResponse sinkResponse = redisSink.pushToSink(messages); + Assert.assertFalse(sinkResponse.hasErrors()); } @Test public void shouldReportParsingErrors() { - List messages = new ArrayList<>(); + List messages = new ArrayList<>(); List records = new ArrayList<>(); - records.add(new RedisRecord(null, 0L, new ErrorInfo(new IOException(""), ErrorType.DESERIALIZATION_ERROR), null, false)); + records.add(new RedisRecord(null, 0L, new ErrorInfo(new IOException(""), ErrorType.DESERIALIZATION_ERROR), null, + false)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 1L, null, null, true)); - records.add(new RedisRecord(null, 2L, new ErrorInfo(new ConfigurationException(""), ErrorType.DEFAULT_ERROR), null, false)); + records.add(new RedisRecord(null, 2L, new ErrorInfo(new ConfigurationException(""), ErrorType.DEFAULT_ERROR), + null, false)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 3L, null, null, true)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 4L, null, null, true)); List responses = new ArrayList<>(); @@ -73,16 +75,16 @@ public void shouldReportParsingErrors() { List validRecords = records.stream().filter(RedisRecord::isValid).collect(Collectors.toList()); when(redisClient.send(validRecords)).thenReturn(responses); RedisSink redisSink = new RedisSink(redisClient, redisParser, instrumentation); - OdpfSinkResponse odpfSinkResponse = redisSink.pushToSink(messages); - Assert.assertTrue(odpfSinkResponse.hasErrors()); - Assert.assertEquals(2, odpfSinkResponse.getErrors().size()); - Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, odpfSinkResponse.getErrorsFor(0).getErrorType()); - Assert.assertEquals(ErrorType.DEFAULT_ERROR, odpfSinkResponse.getErrorsFor(2).getErrorType()); + SinkResponse sinkResponse = redisSink.pushToSink(messages); + Assert.assertTrue(sinkResponse.hasErrors()); + Assert.assertEquals(2, sinkResponse.getErrors().size()); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, sinkResponse.getErrorsFor(0).getErrorType()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, sinkResponse.getErrorsFor(2).getErrorType()); } @Test public void shouldReportClientErrors() { - List messages = new ArrayList<>(); + List messages = new ArrayList<>(); List records = new ArrayList<>(); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 0L, null, null, true)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 1L, null, null, true)); @@ -106,24 +108,26 @@ public void shouldReportClientErrors() { when(redisClient.send(validRecords)).thenReturn(responses); when(redisClient.send(records)).thenReturn(responses); RedisSink redisSink = new RedisSink(redisClient, redisParser, instrumentation); - OdpfSinkResponse odpfSinkResponse = redisSink.pushToSink(messages); - Assert.assertTrue(odpfSinkResponse.hasErrors()); - Assert.assertEquals(3, odpfSinkResponse.getErrors().size()); - Assert.assertEquals(ErrorType.DEFAULT_ERROR, odpfSinkResponse.getErrorsFor(2).getErrorType()); - Assert.assertEquals(ErrorType.DEFAULT_ERROR, odpfSinkResponse.getErrorsFor(3).getErrorType()); - Assert.assertEquals(ErrorType.DEFAULT_ERROR, odpfSinkResponse.getErrorsFor(4).getErrorType()); - Assert.assertEquals("failed at 2", odpfSinkResponse.getErrorsFor(2).getException().getMessage()); - Assert.assertEquals("failed at 3", odpfSinkResponse.getErrorsFor(3).getException().getMessage()); - Assert.assertEquals("failed at 4", odpfSinkResponse.getErrorsFor(4).getException().getMessage()); + SinkResponse sinkResponse = redisSink.pushToSink(messages); + Assert.assertTrue(sinkResponse.hasErrors()); + Assert.assertEquals(3, sinkResponse.getErrors().size()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, sinkResponse.getErrorsFor(2).getErrorType()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, sinkResponse.getErrorsFor(3).getErrorType()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, sinkResponse.getErrorsFor(4).getErrorType()); + Assert.assertEquals("failed at 2", sinkResponse.getErrorsFor(2).getException().getMessage()); + Assert.assertEquals("failed at 3", sinkResponse.getErrorsFor(3).getException().getMessage()); + Assert.assertEquals("failed at 4", sinkResponse.getErrorsFor(4).getException().getMessage()); } @Test public void shouldReportNetErrors() { - List messages = new ArrayList<>(); + List messages = new ArrayList<>(); List records = new ArrayList<>(); - records.add(new RedisRecord(null, 0L, new ErrorInfo(new IOException(""), ErrorType.DESERIALIZATION_ERROR), null, false)); + records.add(new RedisRecord(null, 0L, new ErrorInfo(new IOException(""), ErrorType.DESERIALIZATION_ERROR), null, + false)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 1L, null, null, true)); - records.add(new RedisRecord(null, 2L, new ErrorInfo(new ConfigurationException(""), ErrorType.DEFAULT_ERROR), null, false)); + records.add(new RedisRecord(null, 2L, new ErrorInfo(new ConfigurationException(""), ErrorType.DEFAULT_ERROR), + null, false)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 3L, null, null, true)); records.add(new RedisRecord(new RedisListEntry("key1", "val1", null), 4L, null, null, true)); List responses = new ArrayList<>(); @@ -138,11 +142,11 @@ public void shouldReportNetErrors() { List validRecords = records.stream().filter(RedisRecord::isValid).collect(Collectors.toList()); when(redisClient.send(validRecords)).thenReturn(responses); RedisSink redisSink = new RedisSink(redisClient, redisParser, instrumentation); - OdpfSinkResponse odpfSinkResponse = redisSink.pushToSink(messages); - Assert.assertEquals(4, odpfSinkResponse.getErrors().size()); - Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, odpfSinkResponse.getErrorsFor(0).getErrorType()); - Assert.assertEquals(ErrorType.DEFAULT_ERROR, odpfSinkResponse.getErrorsFor(2).getErrorType()); - Assert.assertEquals("failed at 3", odpfSinkResponse.getErrorsFor(3).getException().getMessage()); - Assert.assertEquals("failed at 4", odpfSinkResponse.getErrorsFor(4).getException().getMessage()); + SinkResponse sinkResponse = redisSink.pushToSink(messages); + Assert.assertEquals(4, sinkResponse.getErrors().size()); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, sinkResponse.getErrorsFor(0).getErrorType()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, sinkResponse.getErrorsFor(2).getErrorType()); + Assert.assertEquals("failed at 3", sinkResponse.getErrorsFor(3).getException().getMessage()); + Assert.assertEquals("failed at 4", sinkResponse.getErrorsFor(4).getException().getMessage()); } } diff --git a/src/test/java/io/odpf/depot/redis/client/RedisClientFactoryTest.java b/src/test/java/org/raystack/depot/redis/client/RedisClientFactoryTest.java similarity index 92% rename from src/test/java/io/odpf/depot/redis/client/RedisClientFactoryTest.java rename to src/test/java/org/raystack/depot/redis/client/RedisClientFactoryTest.java index 4c52d313..77c33424 100644 --- a/src/test/java/io/odpf/depot/redis/client/RedisClientFactoryTest.java +++ b/src/test/java/org/raystack/depot/redis/client/RedisClientFactoryTest.java @@ -1,12 +1,10 @@ -package io.odpf.depot.redis.client; +package org.raystack.depot.redis.client; - - -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.enums.RedisSinkDeploymentType; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDeploymentType; +import org.raystack.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.exception.ConfigurationException; import org.junit.Assert; import org.junit.Rule; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/redis/client/RedisClusterClientTest.java b/src/test/java/org/raystack/depot/redis/client/RedisClusterClientTest.java similarity index 55% rename from src/test/java/io/odpf/depot/redis/client/RedisClusterClientTest.java rename to src/test/java/org/raystack/depot/redis/client/RedisClusterClientTest.java index db051d90..62baf6bc 100644 --- a/src/test/java/io/odpf/depot/redis/client/RedisClusterClientTest.java +++ b/src/test/java/org/raystack/depot/redis/client/RedisClusterClientTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.client; +package org.raystack.depot.redis.client; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.record.RedisRecord; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.ttl.RedisTtl; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -30,25 +30,29 @@ public class RedisClusterClientTest { @Test public void shouldSendToRedisCluster() { RedisClient redisClient = new RedisClusterClient(instrumentation, redisTTL, jedisCluster); - List redisRecords = new ArrayList() {{ - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - }}; - List responses = new ArrayList() {{ - add(Mockito.mock(RedisClusterResponse.class)); - add(Mockito.mock(RedisClusterResponse.class)); - add(Mockito.mock(RedisClusterResponse.class)); - add(Mockito.mock(RedisClusterResponse.class)); - add(Mockito.mock(RedisClusterResponse.class)); - add(Mockito.mock(RedisClusterResponse.class)); - }}; + List redisRecords = new ArrayList() { + { + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + } + }; + List responses = new ArrayList() { + { + add(Mockito.mock(RedisClusterResponse.class)); + add(Mockito.mock(RedisClusterResponse.class)); + add(Mockito.mock(RedisClusterResponse.class)); + add(Mockito.mock(RedisClusterResponse.class)); + add(Mockito.mock(RedisClusterResponse.class)); + add(Mockito.mock(RedisClusterResponse.class)); + } + }; IntStream.range(0, redisRecords.size()).forEach( - index -> Mockito.when(redisRecords.get(index).send(jedisCluster, redisTTL)).thenReturn(responses.get(index)) - ); + index -> Mockito.when(redisRecords.get(index).send(jedisCluster, redisTTL)) + .thenReturn(responses.get(index))); List actualResponse = redisClient.send(redisRecords); IntStream.range(0, redisRecords.size()).forEach( index -> Assert.assertEquals(responses.get(index), actualResponse.get(index))); diff --git a/src/test/java/io/odpf/depot/redis/client/RedisStandaloneClientTest.java b/src/test/java/org/raystack/depot/redis/client/RedisStandaloneClientTest.java similarity index 66% rename from src/test/java/io/odpf/depot/redis/client/RedisStandaloneClientTest.java rename to src/test/java/org/raystack/depot/redis/client/RedisStandaloneClientTest.java index 3d5ed1a8..7f848d42 100644 --- a/src/test/java/io/odpf/depot/redis/client/RedisStandaloneClientTest.java +++ b/src/test/java/org/raystack/depot/redis/client/RedisStandaloneClientTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.client; +package org.raystack.depot.redis.client; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.record.RedisRecord; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.ttl.RedisTtl; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -20,7 +20,6 @@ import java.util.List; import java.util.stream.IntStream; - @RunWith(MockitoJUnitRunner.class) public class RedisStandaloneClientTest { @Mock @@ -48,28 +47,31 @@ public void shouldSendRecordsToJedis() { Mockito.when(pipeline.exec()).thenReturn(response); Object ob = new Object(); Mockito.when(response.get()).thenReturn(ob); - List redisRecords = new ArrayList() {{ - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - add(Mockito.mock(RedisRecord.class)); - }}; - List responses = new ArrayList() {{ - add(Mockito.mock(RedisStandaloneResponse.class)); - add(Mockito.mock(RedisStandaloneResponse.class)); - add(Mockito.mock(RedisStandaloneResponse.class)); - add(Mockito.mock(RedisStandaloneResponse.class)); - add(Mockito.mock(RedisStandaloneResponse.class)); - add(Mockito.mock(RedisStandaloneResponse.class)); - }}; + List redisRecords = new ArrayList() { + { + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + add(Mockito.mock(RedisRecord.class)); + } + }; + List responses = new ArrayList() { + { + add(Mockito.mock(RedisStandaloneResponse.class)); + add(Mockito.mock(RedisStandaloneResponse.class)); + add(Mockito.mock(RedisStandaloneResponse.class)); + add(Mockito.mock(RedisStandaloneResponse.class)); + add(Mockito.mock(RedisStandaloneResponse.class)); + add(Mockito.mock(RedisStandaloneResponse.class)); + } + }; IntStream.range(0, redisRecords.size()).forEach( index -> { Mockito.when(redisRecords.get(index).send(pipeline, redisTTL)).thenReturn(responses.get(index)); Mockito.when(responses.get(index).process()).thenReturn(responses.get(index)); - } - ); + }); List actualResponses = redisClient.send(redisRecords); Mockito.verify(pipeline, Mockito.times(1)).multi(); Mockito.verify(pipeline, Mockito.times(1)).sync(); @@ -77,7 +79,6 @@ public void shouldSendRecordsToJedis() { IntStream.range(0, actualResponses.size()).forEach( index -> { Assert.assertEquals(responses.get(index), actualResponses.get(index)); - } - ); + }); } } diff --git a/src/test/java/io/odpf/depot/redis/client/entry/RedisHashSetFieldEntryTest.java b/src/test/java/org/raystack/depot/redis/client/entry/RedisHashSetFieldEntryTest.java similarity index 92% rename from src/test/java/io/odpf/depot/redis/client/entry/RedisHashSetFieldEntryTest.java rename to src/test/java/org/raystack/depot/redis/client/entry/RedisHashSetFieldEntryTest.java index 574d793b..8a98e0b1 100644 --- a/src/test/java/io/odpf/depot/redis/client/entry/RedisHashSetFieldEntryTest.java +++ b/src/test/java/org/raystack/depot/redis/client/entry/RedisHashSetFieldEntryTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.DurationTtl; -import io.odpf.depot.redis.ttl.NoRedisTtl; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.redis.ttl.DurationTtl; +import org.raystack.depot.redis.ttl.NoRedisTtl; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -86,7 +86,6 @@ public void shouldGetSetEntryToString() { Assert.assertEquals(expected, redisHashSetFieldEntry.toString()); } - @Test public void shouldSentToRedisForStandAlone() { Response r = Mockito.mock(Response.class); @@ -95,7 +94,8 @@ public void shouldSentToRedisForStandAlone() { RedisStandaloneResponse standaloneResponse = redisHashSetFieldEntry.send(pipeline, new NoRedisTtl()); standaloneResponse.process(); Assert.assertFalse(standaloneResponse.isFailed()); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", + "test-value"); Assert.assertEquals("HSET: 9, TTL: NoOp", standaloneResponse.getMessage()); } @@ -110,7 +110,8 @@ public void shouldSentToRedisForStandaloneWithTTL() { RedisStandaloneResponse standaloneResponse = redisHashSetFieldEntry.send(pipeline, new DurationTtl(1000)); standaloneResponse.process(); Assert.assertFalse(standaloneResponse.isFailed()); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", + "test-value"); Assert.assertEquals("HSET: 9, TTL: UPDATED", standaloneResponse.getMessage()); } @@ -125,7 +126,8 @@ public void shouldSentToRedisForStandaloneWithTTLNotUpdated() { RedisStandaloneResponse standaloneResponse = redisHashSetFieldEntry.send(pipeline, new DurationTtl(1000)); standaloneResponse.process(); Assert.assertFalse(standaloneResponse.isFailed()); - verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", "test-value"); + verify(instrumentation, times(1)).logDebug("key: {}, field: {}, value: {}", "test-key", "test-field", + "test-value"); Assert.assertEquals("HSET: 9, TTL: NOT UPDATED", standaloneResponse.getMessage()); } diff --git a/src/test/java/io/odpf/depot/redis/client/entry/RedisKeyValueEntryTest.java b/src/test/java/org/raystack/depot/redis/client/entry/RedisKeyValueEntryTest.java similarity index 95% rename from src/test/java/io/odpf/depot/redis/client/entry/RedisKeyValueEntryTest.java rename to src/test/java/org/raystack/depot/redis/client/entry/RedisKeyValueEntryTest.java index df928442..33cdd71d 100644 --- a/src/test/java/io/odpf/depot/redis/client/entry/RedisKeyValueEntryTest.java +++ b/src/test/java/org/raystack/depot/redis/client/entry/RedisKeyValueEntryTest.java @@ -1,11 +1,10 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; - -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.DurationTtl; -import io.odpf.depot.redis.ttl.NoRedisTtl; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.redis.ttl.DurationTtl; +import org.raystack.depot.redis.ttl.NoRedisTtl; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -87,7 +86,6 @@ public void shouldGetEntryToString() { Assert.assertEquals(expected, redisKeyValueEntry.toString()); } - @Test public void shouldSentToRedisForStandAlone() { Response r = Mockito.mock(Response.class); @@ -155,4 +153,3 @@ public void shouldReportFailedForJedisExceptionFromTTLForStandalone() { Assert.assertEquals("jedis error occurred", standaloneResponse.getMessage()); } } - diff --git a/src/test/java/io/odpf/depot/redis/client/entry/RedisListEntryTest.java b/src/test/java/org/raystack/depot/redis/client/entry/RedisListEntryTest.java similarity index 95% rename from src/test/java/io/odpf/depot/redis/client/entry/RedisListEntryTest.java rename to src/test/java/org/raystack/depot/redis/client/entry/RedisListEntryTest.java index 8a69a43e..20211bf1 100644 --- a/src/test/java/io/odpf/depot/redis/client/entry/RedisListEntryTest.java +++ b/src/test/java/org/raystack/depot/redis/client/entry/RedisListEntryTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.client.entry; +package org.raystack.depot.redis.client.entry; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.DurationTtl; -import io.odpf.depot.redis.ttl.NoRedisTtl; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.redis.ttl.DurationTtl; +import org.raystack.depot.redis.ttl.NoRedisTtl; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -86,7 +86,6 @@ public void shouldGetEntryToString() { Assert.assertEquals(expected, redisListEntry.toString()); } - @Test public void shouldSentToRedisForStandAlone() { Response r = Mockito.mock(Response.class); diff --git a/src/test/java/io/odpf/depot/redis/client/response/RedisClusterResponseTest.java b/src/test/java/org/raystack/depot/redis/client/response/RedisClusterResponseTest.java similarity index 93% rename from src/test/java/io/odpf/depot/redis/client/response/RedisClusterResponseTest.java rename to src/test/java/org/raystack/depot/redis/client/response/RedisClusterResponseTest.java index 0f8709f9..fba4182b 100644 --- a/src/test/java/io/odpf/depot/redis/client/response/RedisClusterResponseTest.java +++ b/src/test/java/org/raystack/depot/redis/client/response/RedisClusterResponseTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.client.response; +package org.raystack.depot.redis.client.response; import org.junit.Assert; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/redis/client/response/RedisStandaloneResponseTest.java b/src/test/java/org/raystack/depot/redis/client/response/RedisStandaloneResponseTest.java similarity index 96% rename from src/test/java/io/odpf/depot/redis/client/response/RedisStandaloneResponseTest.java rename to src/test/java/org/raystack/depot/redis/client/response/RedisStandaloneResponseTest.java index 24ced647..02718a84 100644 --- a/src/test/java/io/odpf/depot/redis/client/response/RedisStandaloneResponseTest.java +++ b/src/test/java/org/raystack/depot/redis/client/response/RedisStandaloneResponseTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.client.response; +package org.raystack.depot.redis.client.response; import org.junit.Assert; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/redis/parsers/RedisEntryParserFactoryTest.java b/src/test/java/org/raystack/depot/redis/parsers/RedisEntryParserFactoryTest.java similarity index 93% rename from src/test/java/io/odpf/depot/redis/parsers/RedisEntryParserFactoryTest.java rename to src/test/java/org/raystack/depot/redis/parsers/RedisEntryParserFactoryTest.java index eaebe1c0..46c6134a 100644 --- a/src/test/java/io/odpf/depot/redis/parsers/RedisEntryParserFactoryTest.java +++ b/src/test/java/org/raystack/depot/redis/parsers/RedisEntryParserFactoryTest.java @@ -1,10 +1,10 @@ -package io.odpf.depot.redis.parsers; +package org.raystack.depot.redis.parsers; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.config.converter.JsonToPropertiesConverter; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.enums.RedisSinkDataType; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.config.converter.JsonToPropertiesConverter; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDataType; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -23,7 +23,7 @@ public class RedisEntryParserFactoryTest { @Mock private StatsDReporter statsDReporter; @Mock - private OdpfMessageSchema schema; + private MessageSchema schema; @Before public void setup() { diff --git a/src/test/java/io/odpf/depot/redis/parsers/RedisHashSetEntryParserTest.java b/src/test/java/org/raystack/depot/redis/parsers/RedisHashSetEntryParserTest.java similarity index 58% rename from src/test/java/io/odpf/depot/redis/parsers/RedisHashSetEntryParserTest.java rename to src/test/java/org/raystack/depot/redis/parsers/RedisHashSetEntryParserTest.java index b9de1bc6..6224445e 100644 --- a/src/test/java/io/odpf/depot/redis/parsers/RedisHashSetEntryParserTest.java +++ b/src/test/java/org/raystack/depot/redis/parsers/RedisHashSetEntryParserTest.java @@ -1,25 +1,25 @@ -package io.odpf.depot.redis.parsers; +package org.raystack.depot.redis.parsers; import com.google.common.collect.ImmutableMap; import com.google.protobuf.Descriptors; import com.google.protobuf.Struct; import com.google.protobuf.Timestamp; import com.google.protobuf.Value; -import io.odpf.depot.TestBookingLogMessage; -import io.odpf.depot.TestKey; -import io.odpf.depot.TestLocation; -import io.odpf.depot.TestMessageBQ; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.config.converter.JsonToPropertiesConverter; -import io.odpf.depot.message.OdpfMessage; -import io.odpf.depot.message.OdpfMessageSchema; -import io.odpf.depot.message.ParsedOdpfMessage; -import io.odpf.depot.message.SinkConnectorSchemaMessageMode; -import io.odpf.depot.message.proto.ProtoOdpfMessageParser; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.entry.RedisHashSetFieldEntry; -import io.odpf.depot.redis.enums.RedisSinkDataType; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.entry.RedisHashSetFieldEntry; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestKey; +import org.raystack.depot.TestLocation; +import org.raystack.depot.TestMessageBQ; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.config.converter.JsonToPropertiesConverter; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDataType; import org.aeonbits.owner.ConfigFactory; import org.json.JSONArray; import org.junit.Assert; @@ -36,40 +36,44 @@ @RunWith(MockitoJUnitRunner.class) public class RedisHashSetEntryParserTest { - private final Map descriptorsMap = new HashMap() {{ - put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); - put(String.format("%s", TestBookingLogMessage.class.getName()), TestBookingLogMessage.getDescriptor()); - put(String.format("%s", TestLocation.class.getName()), TestLocation.getDescriptor()); - put(String.format("%s", TestBookingLogMessage.TopicMetadata.class.getName()), TestBookingLogMessage.TopicMetadata.getDescriptor()); - put(String.format("%s", TestMessageBQ.class.getName()), TestMessageBQ.getDescriptor()); - put("io.odpf.depot.TestMessageBQ.CurrentStateEntry", TestMessageBQ.getDescriptor().getNestedTypes().get(0)); - put("com.google.protobuf.Struct.FieldsEntry", Struct.getDescriptor().getNestedTypes().get(0)); - put("com.google.protobuf.Duration", com.google.protobuf.Duration.getDescriptor()); - put("com.google.type.Date", com.google.type.Date.getDescriptor()); - }}; + private final Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); + put(String.format("%s", TestBookingLogMessage.class.getName()), TestBookingLogMessage.getDescriptor()); + put(String.format("%s", TestLocation.class.getName()), TestLocation.getDescriptor()); + put(String.format("%s", TestBookingLogMessage.TopicMetadata.class.getName()), + TestBookingLogMessage.TopicMetadata.getDescriptor()); + put(String.format("%s", TestMessageBQ.class.getName()), TestMessageBQ.getDescriptor()); + put("org.raystack.depot.TestMessageBQ.CurrentStateEntry", + TestMessageBQ.getDescriptor().getNestedTypes().get(0)); + put("com.google.protobuf.Struct.FieldsEntry", Struct.getDescriptor().getNestedTypes().get(0)); + put("com.google.protobuf.Duration", com.google.protobuf.Duration.getDescriptor()); + put("com.google.type.Date", com.google.type.Date.getDescriptor()); + } + }; @Mock private RedisSinkConfig redisSinkConfig; @Mock private StatsDReporter statsDReporter; - private ParsedOdpfMessage parsedBookingMessage; - private ParsedOdpfMessage parsedOdpfKey; - private OdpfMessageSchema schemaBooking; - private OdpfMessageSchema schemaKey; + private ParsedMessage parsedBookingMessage; + private ParsedMessage parsedKey; + private MessageSchema schemaBooking; + private MessageSchema schemaKey; private void redisSinkSetup(String field) throws IOException { when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.HASHSET); when(redisSinkConfig.getSinkRedisHashsetFieldToColumnMapping()).thenReturn(new JsonToPropertiesConverter().convert(null, field)); when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn("test-key"); - String schemaBookingClass = "io.odpf.depot.TestBookingLogMessage"; - String schemaKeyClass = "io.odpf.depot.TestKey"; + String schemaBookingClass = "org.raystack.depot.TestBookingLogMessage"; + String schemaKeyClass = "org.raystack.depot.TestKey"; TestKey testKey = TestKey.newBuilder().setOrderNumber("ORDER-1-FROM-KEY").build(); TestBookingLogMessage testBookingLogMessage = TestBookingLogMessage.newBuilder().setOrderNumber("booking-order-1").setCustomerTotalFareWithoutSurge(2000L).setAmountPaidByCash(12.3F).build(); - OdpfMessage bookingMessage = new OdpfMessage(testKey.toByteArray(), testBookingLogMessage.toByteArray()); - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(redisSinkConfig, statsDReporter, null); - parsedBookingMessage = odpfMessageParser.parse(bookingMessage, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaBookingClass); - parsedOdpfKey = odpfMessageParser.parse(bookingMessage, SinkConnectorSchemaMessageMode.LOG_KEY, schemaKeyClass); - schemaBooking = odpfMessageParser.getSchema(schemaBookingClass, descriptorsMap); - schemaKey = odpfMessageParser.getSchema(schemaKeyClass, descriptorsMap); + Message bookingMessage = new Message(testKey.toByteArray(), testBookingLogMessage.toByteArray()); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(redisSinkConfig, statsDReporter, null); + parsedBookingMessage = protoMessageParser.parse(bookingMessage, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaBookingClass); + parsedKey = protoMessageParser.parse(bookingMessage, SinkConnectorSchemaMessageMode.LOG_KEY, schemaKeyClass); + schemaBooking = protoMessageParser.getSchema(schemaBookingClass, descriptorsMap); + schemaKey = protoMessageParser.getSchema(schemaKeyClass, descriptorsMap); } @Test @@ -77,9 +81,8 @@ public void shouldParseComplexProtoType() throws IOException { RedisSinkConfig config = ConfigFactory.create(RedisSinkConfig.class, ImmutableMap.of( "SINK_REDIS_DATA_TYPE", "HASHSET", "SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING", "{\"topics\":\"topics_%s,customer_name\"}", - "SINK_REDIS_KEY_TEMPLATE", "subscription:driver:%s,customer_name" - )); - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(config, statsDReporter, null); + "SINK_REDIS_KEY_TEMPLATE", "subscription:driver:%s,customer_name")); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(config, statsDReporter, null); TestBookingLogMessage testBookingLogMessage = TestBookingLogMessage.newBuilder() .setCustomerName("johndoe") .addTopics(TestBookingLogMessage.TopicMetadata.newBuilder() @@ -89,19 +92,24 @@ public void shouldParseComplexProtoType() throws IOException { .setQos(123) .setTopic("topic2").build()) .build(); - OdpfMessage bookingMessage = new OdpfMessage(null, testBookingLogMessage.toByteArray()); - String schemaMessageClass = "io.odpf.depot.TestBookingLogMessage"; - OdpfMessageSchema schema = odpfMessageParser.getSchema(schemaMessageClass, descriptorsMap); + Message bookingMessage = new Message(null, testBookingLogMessage.toByteArray()); + String schemaMessageClass = "org.raystack.depot.TestBookingLogMessage"; + MessageSchema schema = protoMessageParser.getSchema(schemaMessageClass, descriptorsMap); - parsedBookingMessage = odpfMessageParser.parse(bookingMessage, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaMessageClass); + parsedBookingMessage = protoMessageParser.parse(bookingMessage, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + schemaMessageClass); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(config, statsDReporter, schema); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(config, statsDReporter, + schema); List redisEntry = redisHashSetEntryParser.getRedisEntry(parsedBookingMessage); assertEquals(1, redisEntry.size()); RedisHashSetFieldEntry redisHashSetFieldEntry = (RedisHashSetFieldEntry) redisEntry.get(0); assertEquals("subscription:driver:johndoe", redisHashSetFieldEntry.getKey()); assertEquals("topics_johndoe", redisHashSetFieldEntry.getField()); - assertEquals(new JSONArray("[{\"qos\":1,\"topic\":\"hellowo/rl/dcom.world.partner\"},{\"qos\":123,\"topic\":\"topic2\"}]").toString(), + assertEquals( + new JSONArray( + "[{\"qos\":1,\"topic\":\"hellowo/rl/dcom.world.partner\"},{\"qos\":123,\"topic\":\"topic2\"}]") + .toString(), new JSONArray(redisHashSetFieldEntry.getValue()).toString()); } @@ -110,9 +118,8 @@ public void shouldParseRepeatedStruct() throws IOException { RedisSinkConfig config = ConfigFactory.create(RedisSinkConfig.class, ImmutableMap.of( "SINK_REDIS_DATA_TYPE", "HASHSET", "SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING", "{\"attributes\":\"test_order_%s,created_at\"}", - "SINK_REDIS_KEY_TEMPLATE", "subscription:order:%s,order_number" - )); - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(config, statsDReporter, null); + "SINK_REDIS_KEY_TEMPLATE", "subscription:order:%s,order_number")); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(config, statsDReporter, null); TestMessageBQ message = TestMessageBQ.newBuilder() .addAttributes(Struct.newBuilder().putFields("name", Value.newBuilder().setStringValue("John").build()) .putFields("age", Value.newBuilder().setNumberValue(50).build()).build()) @@ -125,17 +132,20 @@ public void shouldParseRepeatedStruct() throws IOException { .setOrderNumber("test_order") .build(); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - OdpfMessage odpfMessage = new OdpfMessage(null, message.toByteArray()); - ParsedOdpfMessage parsedMessage = odpfMessageParser.parse(odpfMessage, SinkConnectorSchemaMessageMode.LOG_MESSAGE, "io.odpf.depot.TestMessageBQ"); + MessageSchema messageSchema = protoMessageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Message message1 = new Message(null, message.toByteArray()); + ParsedMessage parsedMessage = protoMessageParser.parse(message1, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + "org.raystack.depot.TestMessageBQ"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(config, statsDReporter, odpfMessageSchema); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(config, statsDReporter, + messageSchema); List redisEntry = redisHashSetEntryParser.getRedisEntry(parsedMessage); assertEquals(1, redisEntry.size()); RedisHashSetFieldEntry redisHashSetFieldEntry = (RedisHashSetFieldEntry) redisEntry.get(0); assertEquals("subscription:order:test_order", redisHashSetFieldEntry.getKey()); assertEquals("test_order_2022-11-26T03:29:19Z", redisHashSetFieldEntry.getField()); - assertEquals("[{\"name\":\"John\",\"age\":50.0},{\"name\":\"John\",\"age\":60.0},{\"name\":\"John\",\"active\":true,\"height\":175.0}]", + assertEquals( + "[{\"name\":\"John\",\"age\":50.0},{\"name\":\"John\",\"age\":60.0},{\"name\":\"John\",\"active\":true,\"height\":175.0}]", redisHashSetFieldEntry.getValue()); } @@ -144,67 +154,79 @@ public void shouldThrowExceptionForWrongConfig() throws IOException { RedisSinkConfig config = ConfigFactory.create(RedisSinkConfig.class, ImmutableMap.of( "SINK_REDIS_DATA_TYPE", "HASHSET", "SINK_REDIS_HASHSET_FIELD_TO_COLUMN_MAPPING", "{\"does_not_exist\":\"test_order_%s,order_number\"}", - "SINK_REDIS_KEY_TEMPLATE", "subscription:order:%s,order_number" - )); + "SINK_REDIS_KEY_TEMPLATE", "subscription:order:%s,order_number")); - ProtoOdpfMessageParser odpfMessageParser = new ProtoOdpfMessageParser(config, statsDReporter, null); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(config, statsDReporter, null); TestMessageBQ message = TestMessageBQ.newBuilder().setOrderNumber("test").build(); - OdpfMessageSchema odpfMessageSchema = odpfMessageParser.getSchema("io.odpf.depot.TestMessageBQ", descriptorsMap); - OdpfMessage odpfMessage = new OdpfMessage(null, message.toByteArray()); - ParsedOdpfMessage parsedMessage = odpfMessageParser.parse(odpfMessage, SinkConnectorSchemaMessageMode.LOG_MESSAGE, "io.odpf.depot.TestMessageBQ"); - - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(config, statsDReporter, odpfMessageSchema); - IllegalArgumentException exception = Assert.assertThrows(IllegalArgumentException.class, () -> redisHashSetEntryParser.getRedisEntry(parsedMessage)); + MessageSchema messageSchema = protoMessageParser.getSchema("org.raystack.depot.TestMessageBQ", descriptorsMap); + Message message1 = new Message(null, message.toByteArray()); + ParsedMessage parsedMessage = protoMessageParser.parse(message1, SinkConnectorSchemaMessageMode.LOG_MESSAGE, + "org.raystack.depot.TestMessageBQ"); + + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(config, statsDReporter, + messageSchema); + IllegalArgumentException exception = Assert.assertThrows(IllegalArgumentException.class, + () -> redisHashSetEntryParser.getRedisEntry(parsedMessage)); Assert.assertEquals("Invalid field config : does_not_exist", exception.getMessage()); } @Test public void shouldParseLongMessageForKey() throws IOException { redisSinkSetup("{\"order_number\":\"ORDER_NUMBER_%s,customer_total_fare_without_surge\"}"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, + statsDReporter, schemaBooking); List redisEntries = redisHashSetEntryParser.getRedisEntry(parsedBookingMessage); - RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER_2000", "booking-order-1", null); + RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER_2000", + "booking-order-1", null); assertEquals(Collections.singletonList(expectedEntry), redisEntries); } @Test public void shouldParseLongMessageWithSpaceForKey() throws IOException { redisSinkSetup("{\"order_number\":\"ORDER_NUMBER_%s, customer_total_fare_without_surge\"}"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, + statsDReporter, schemaBooking); List redisEntries = redisHashSetEntryParser.getRedisEntry(parsedBookingMessage); - RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER_2000", "booking-order-1", null); + RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER_2000", + "booking-order-1", null); assertEquals(Collections.singletonList(expectedEntry), redisEntries); } @Test public void shouldParseStringMessageForKey() throws IOException { redisSinkSetup("{\"order_number\":\"ORDER_NUMBER_%s,order_number\"}"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, + statsDReporter, schemaBooking); List redisEntries = redisHashSetEntryParser.getRedisEntry(parsedBookingMessage); - RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER_booking-order-1", "booking-order-1", null); + RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER_booking-order-1", + "booking-order-1", null); assertEquals(Collections.singletonList(expectedEntry), redisEntries); } @Test public void shouldHandleStaticStringForKey() throws IOException { redisSinkSetup("{\"order_number\":\"ORDER_NUMBER\"}"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, + statsDReporter, schemaBooking); List redisEntries = redisHashSetEntryParser.getRedisEntry(parsedBookingMessage); - RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER", "booking-order-1", null); + RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER", "booking-order-1", + null); assertEquals(Collections.singletonList(expectedEntry), redisEntries); } @Test public void shouldThrowErrorForInvalidFormatForKey() throws IOException { redisSinkSetup("{\"order_details\":\"ORDER_NUMBER%, order_number\"}"); - IllegalArgumentException e = Assert.assertThrows(IllegalArgumentException.class, () -> RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking)); + IllegalArgumentException e = Assert.assertThrows(IllegalArgumentException.class, + () -> RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking)); assertEquals("Template is not valid, variables=1, validArgs=0, values=1", e.getMessage()); } @Test public void shouldThrowErrorForIncompatibleFormatForKey() throws IOException { redisSinkSetup("{\"order_details\":\"order_number-%d, order_number\"}"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaBooking); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, + statsDReporter, schemaBooking); IllegalFormatConversionException e = Assert.assertThrows(IllegalFormatConversionException.class, () -> redisHashSetEntryParser.getRedisEntry(parsedBookingMessage)); assertEquals("d != java.lang.String", e.getMessage()); @@ -213,9 +235,11 @@ public void shouldThrowErrorForIncompatibleFormatForKey() throws IOException { @Test public void shouldParseKeyWhenKafkaMessageParseModeSetToKey() throws IOException { redisSinkSetup("{\"order_number\":\"ORDER_NUMBER\"}"); - RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schemaKey); - List redisEntries = redisHashSetEntryParser.getRedisEntry(parsedOdpfKey); - RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER", "ORDER-1-FROM-KEY", null); + RedisEntryParser redisHashSetEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, + statsDReporter, schemaKey); + List redisEntries = redisHashSetEntryParser.getRedisEntry(parsedKey); + RedisHashSetFieldEntry expectedEntry = new RedisHashSetFieldEntry("test-key", "ORDER_NUMBER", + "ORDER-1-FROM-KEY", null); assertEquals(Collections.singletonList(expectedEntry), redisEntries); } } diff --git a/src/test/java/org/raystack/depot/redis/parsers/RedisKeyValueEntryParserTest.java b/src/test/java/org/raystack/depot/redis/parsers/RedisKeyValueEntryParserTest.java new file mode 100644 index 00000000..d390d883 --- /dev/null +++ b/src/test/java/org/raystack/depot/redis/parsers/RedisKeyValueEntryParserTest.java @@ -0,0 +1,84 @@ +package org.raystack.depot.redis.parsers; + +import com.google.protobuf.Descriptors; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.entry.RedisKeyValueEntry; +import org.raystack.depot.TestKey; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestNestedMessage; +import org.raystack.depot.TestNestedRepeatedMessage; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDataType; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class RedisKeyValueEntryParserTest { + private final Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); + put(String.format("%s", TestNestedRepeatedMessage.class.getName()), + TestNestedRepeatedMessage.getDescriptor()); + } + }; + @Mock + private RedisSinkConfig redisSinkConfig; + @Mock + private StatsDReporter statsDReporter; + private RedisEntryParser redisKeyValueEntryParser; + private MessageSchema schema; + private ParsedMessage parsedMessage; + + private void redisSinkSetup(String template, String field) throws IOException { + when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.KEYVALUE); + when(redisSinkConfig.getSinkRedisKeyValueDataFieldName()).thenReturn(field); + when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn(template); + ProtoMessageParser messageParser = new ProtoMessageParser(redisSinkConfig, statsDReporter, null); + String schemaClass = "org.raystack.depot.TestMessage"; + schema = messageParser.getSchema(schemaClass, descriptorsMap); + byte[] logMessage = TestMessage.newBuilder() + .setOrderNumber("xyz-order") + .setOrderDetails("new-eureka-order") + .build() + .toByteArray(); + Message message = new Message(null, logMessage); + parsedMessage = messageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass); + redisKeyValueEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schema); + } + + @Test + public void shouldConvertParsedMessageToRedisKeyValueEntry() throws IOException { + redisSinkSetup("test-key", "order_details"); + List redisDataEntries = redisKeyValueEntryParser.getRedisEntry(parsedMessage); + RedisKeyValueEntry expectedEntry = new RedisKeyValueEntry("test-key", "new-eureka-order", null); + assertEquals(Collections.singletonList(expectedEntry), redisDataEntries); + } + + @Test + public void shouldThrowExceptionForInvalidKeyValueDataFieldName() throws IOException { + redisSinkSetup("test-key", "random-field"); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> redisKeyValueEntryParser.getRedisEntry(parsedMessage)); + assertEquals("Invalid field config : random-field", exception.getMessage()); + } +} diff --git a/src/test/java/org/raystack/depot/redis/parsers/RedisListEntryParserTest.java b/src/test/java/org/raystack/depot/redis/parsers/RedisListEntryParserTest.java new file mode 100644 index 00000000..00219152 --- /dev/null +++ b/src/test/java/org/raystack/depot/redis/parsers/RedisListEntryParserTest.java @@ -0,0 +1,85 @@ +package org.raystack.depot.redis.parsers; + +import com.google.protobuf.Descriptors; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.entry.RedisListEntry; +import org.raystack.depot.TestKey; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestNestedMessage; +import org.raystack.depot.TestNestedRepeatedMessage; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.message.Message; +import org.raystack.depot.message.MessageSchema; +import org.raystack.depot.message.ParsedMessage; +import org.raystack.depot.message.SinkConnectorSchemaMessageMode; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDataType; + +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThrows; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class RedisListEntryParserTest { + private final Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); + put(String.format("%s", TestNestedRepeatedMessage.class.getName()), + TestNestedRepeatedMessage.getDescriptor()); + } + }; + @Mock + private RedisSinkConfig redisSinkConfig; + @Mock + private StatsDReporter statsDReporter; + private RedisEntryParser redisListEntryParser; + private MessageSchema schema; + private ParsedMessage parsedMessage; + + private void redisSinkSetup(String template, String field) throws IOException { + when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.LIST); + when(redisSinkConfig.getSinkRedisListDataFieldName()).thenReturn(field); + when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn(template); + ProtoMessageParser protoMessageParser = new ProtoMessageParser(redisSinkConfig, statsDReporter, null); + String schemaClass = "org.raystack.depot.TestMessage"; + schema = protoMessageParser.getSchema(schemaClass, descriptorsMap); + byte[] logMessage = TestMessage.newBuilder() + .setOrderNumber("xyz-order") + .setOrderDetails("new-eureka-order") + .build() + .toByteArray(); + Message message = new Message(null, logMessage); + parsedMessage = protoMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass); + redisListEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, schema); + } + + @Test + public void shouldConvertParsedMessageToRedisListEntry() throws IOException { + redisSinkSetup("test-key", "order_details"); + List redisDataEntries = redisListEntryParser.getRedisEntry(parsedMessage); + RedisListEntry expectedEntry = new RedisListEntry("test-key", "new-eureka-order", null); + assertEquals(Collections.singletonList(expectedEntry), redisDataEntries); + } + + @Test + public void shouldThrowExceptionForInvalidKeyValueDataFieldName() throws IOException { + redisSinkSetup("test-key", "random-field"); + IllegalArgumentException exception = assertThrows(IllegalArgumentException.class, + () -> redisListEntryParser.getRedisEntry(parsedMessage)); + assertEquals("Invalid field config : random-field", exception.getMessage()); + } +} diff --git a/src/test/java/org/raystack/depot/redis/parsers/RedisParserTest.java b/src/test/java/org/raystack/depot/redis/parsers/RedisParserTest.java new file mode 100644 index 00000000..bb92b1cd --- /dev/null +++ b/src/test/java/org/raystack/depot/redis/parsers/RedisParserTest.java @@ -0,0 +1,152 @@ +package org.raystack.depot.redis.parsers; + +import com.google.protobuf.Descriptors; +import org.raystack.depot.TestKey; +import org.raystack.depot.TestMessage; +import org.raystack.depot.TestNestedMessage; +import org.raystack.depot.TestNestedRepeatedMessage; +import org.raystack.depot.message.*; +import org.raystack.depot.redis.client.entry.RedisKeyValueEntry; +import org.raystack.stencil.Parser; +import org.raystack.stencil.StencilClientFactory; +import org.raystack.depot.common.Tuple; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.config.enums.SinkConnectorSchemaDataType; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.message.proto.ProtoMessageParser; +import org.raystack.depot.message.proto.ProtoParsedMessage; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.enums.RedisSinkDataType; +import org.raystack.depot.redis.record.RedisRecord; +import org.raystack.depot.utils.MessageConfigUtils; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.junit.runner.RunWith; +import org.mockito.Mock; +import org.mockito.junit.MockitoJUnitRunner; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import java.util.stream.IntStream; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.when; + +@RunWith(MockitoJUnitRunner.class) +public class RedisParserTest { + private final List messages = new ArrayList<>(); + private final String schemaClass = "org.raystack.depot.TestMessage"; + private final Map descriptorsMap = new HashMap() { + { + put(String.format("%s", TestKey.class.getName()), TestKey.getDescriptor()); + put(String.format("%s", TestMessage.class.getName()), TestMessage.getDescriptor()); + put(String.format("%s", TestNestedMessage.class.getName()), TestNestedMessage.getDescriptor()); + put(String.format("%s", TestNestedRepeatedMessage.class.getName()), + TestNestedRepeatedMessage.getDescriptor()); + } + }; + @Mock + private RedisSinkConfig redisSinkConfig; + @Mock + private ProtoMessageParser protoMessageParser; + @Mock + private StatsDReporter statsDReporter; + private RedisParser redisParser; + + @Before + public void setup() throws IOException { + when(redisSinkConfig.getSinkRedisDataType()).thenReturn(RedisSinkDataType.KEYVALUE); + when(redisSinkConfig.getSinkRedisKeyTemplate()).thenReturn("test-key"); + when(redisSinkConfig.getSinkRedisKeyValueDataFieldName()).thenReturn("order_number"); + when(redisSinkConfig.getSinkConnectorSchemaMessageMode()).thenReturn(SinkConnectorSchemaMessageMode.LOG_MESSAGE); + when(redisSinkConfig.getSinkConnectorSchemaProtoMessageClass()).thenReturn(schemaClass); + when(redisSinkConfig.getSinkConnectorSchemaDataType()).thenReturn(SinkConnectorSchemaDataType.PROTOBUF); + TestMessage message1 = TestMessage.newBuilder().setOrderNumber("test-order-1").setOrderDetails("ORDER-DETAILS-1").build(); + TestMessage message2 = TestMessage.newBuilder().setOrderNumber("test-order-2").setOrderDetails("ORDER-DETAILS-2").build(); + TestMessage message3 = TestMessage.newBuilder().setOrderNumber("test-order-3").setOrderDetails("ORDER-DETAILS-3").build(); + TestMessage message4 = TestMessage.newBuilder().setOrderNumber("test-order-4").setOrderDetails("ORDER-DETAILS-4").build(); + TestMessage message5 = TestMessage.newBuilder().setOrderNumber("test-order-5").setOrderDetails("ORDER-DETAILS-5").build(); + TestMessage message6 = TestMessage.newBuilder().setOrderNumber("test-order-6").setOrderDetails("ORDER-DETAILS-6").build(); + messages.add(new Message(null, message1.toByteArray())); + messages.add(new Message(null, message2.toByteArray())); + messages.add(new Message(null, message3.toByteArray())); + messages.add(new Message(null, message4.toByteArray())); + messages.add(new Message(null, message5.toByteArray())); + messages.add(new Message(null, message6.toByteArray())); + } + + public void setupParserResponse() throws IOException { + Parser protoParser = StencilClientFactory.getClient().getParser(TestMessage.class.getName()); + for (Message message : messages) { + ParsedMessage parsedMessage = new ProtoParsedMessage(protoParser.parse((byte[]) message.getLogMessage())); + when(protoMessageParser.parse(message, SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)) + .thenReturn(parsedMessage); + } + ProtoMessageParser messageParser = (ProtoMessageParser) MessageParserFactory.getParser(redisSinkConfig, + statsDReporter); + Tuple modeAndSchema = MessageConfigUtils + .getModeAndSchema(redisSinkConfig); + MessageSchema schema = messageParser.getSchema(modeAndSchema.getSecond(), descriptorsMap); + RedisEntryParser redisEntryParser = RedisEntryParserFactory.getRedisEntryParser(redisSinkConfig, statsDReporter, + schema); + redisParser = new RedisParser(this.protoMessageParser, redisEntryParser, modeAndSchema); + } + + @Test + public void shouldConvertMessageToRedisRecords() throws IOException { + setupParserResponse(); + List parsedRecords = redisParser.convert(messages); + Map> splitterRecords = parsedRecords.stream() + .collect(Collectors.partitioningBy(RedisRecord::isValid)); + List invalidRecords = splitterRecords.get(Boolean.FALSE); + List validRecords = splitterRecords.get(Boolean.TRUE); + assertEquals(6, validRecords.size()); + assertTrue(invalidRecords.isEmpty()); + List expectedRecords = new ArrayList<>(); + expectedRecords + .add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-1", null), 0L, null, "{}", true)); + expectedRecords + .add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-2", null), 1L, null, "{}", true)); + expectedRecords + .add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-3", null), 2L, null, "{}", true)); + expectedRecords + .add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-4", null), 3L, null, "{}", true)); + expectedRecords + .add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-5", null), 4L, null, "{}", true)); + expectedRecords + .add(new RedisRecord(new RedisKeyValueEntry("test-key", "test-order-6", null), 5L, null, "{}", true)); + IntStream.range(0, expectedRecords.size()).forEach( + index -> assertEquals(expectedRecords.get(index).toString(), parsedRecords.get(index).toString())); + } + + @Test + public void shouldReportValidAndInvalidRecords() throws IOException { + setupParserResponse(); + when(protoMessageParser.parse(messages.get(2), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)) + .thenThrow(new IOException("Error while parsing protobuf")); + when(protoMessageParser.parse(messages.get(3), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)) + .thenThrow(new ConfigurationException("Invalid field config : INVALID")); + when(protoMessageParser.parse(messages.get(4), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)) + .thenThrow(new IllegalArgumentException("Config REDIS_CONFIG is empty")); + when(protoMessageParser.parse(messages.get(5), SinkConnectorSchemaMessageMode.LOG_MESSAGE, schemaClass)) + .thenThrow(new UnsupportedOperationException("some message")); + List parsedRecords = redisParser.convert(messages); + Map> splitterRecords = parsedRecords.stream() + .collect(Collectors.partitioningBy(RedisRecord::isValid)); + List invalidRecords = splitterRecords.get(Boolean.FALSE); + List validRecords = splitterRecords.get(Boolean.TRUE); + assertEquals(2, validRecords.size()); + assertEquals(4, invalidRecords.size()); + Assert.assertEquals(ErrorType.DESERIALIZATION_ERROR, parsedRecords.get(2).getErrorInfo().getErrorType()); + Assert.assertEquals(ErrorType.UNKNOWN_FIELDS_ERROR, parsedRecords.get(3).getErrorInfo().getErrorType()); + Assert.assertEquals(ErrorType.DEFAULT_ERROR, parsedRecords.get(4).getErrorInfo().getErrorType()); + Assert.assertEquals(ErrorType.INVALID_MESSAGE_ERROR, parsedRecords.get(5).getErrorInfo().getErrorType()); + } +} diff --git a/src/test/java/io/odpf/depot/redis/record/RedisRecordTest.java b/src/test/java/org/raystack/depot/redis/record/RedisRecordTest.java similarity index 87% rename from src/test/java/io/odpf/depot/redis/record/RedisRecordTest.java rename to src/test/java/org/raystack/depot/redis/record/RedisRecordTest.java index f3be35ea..b475d40f 100644 --- a/src/test/java/io/odpf/depot/redis/record/RedisRecordTest.java +++ b/src/test/java/org/raystack/depot/redis/record/RedisRecordTest.java @@ -1,11 +1,11 @@ -package io.odpf.depot.redis.record; +package org.raystack.depot.redis.record; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.redis.client.entry.RedisEntry; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisStandaloneResponse; -import io.odpf.depot.redis.ttl.RedisTtl; +import org.raystack.depot.redis.client.entry.RedisEntry; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisStandaloneResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.redis.ttl.RedisTtl; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; diff --git a/src/test/java/io/odpf/depot/redis/ttl/DurationTTLTest.java b/src/test/java/org/raystack/depot/redis/ttl/DurationTTLTest.java similarity index 96% rename from src/test/java/io/odpf/depot/redis/ttl/DurationTTLTest.java rename to src/test/java/org/raystack/depot/redis/ttl/DurationTTLTest.java index 45f3c1fa..0c6488b3 100644 --- a/src/test/java/io/odpf/depot/redis/ttl/DurationTTLTest.java +++ b/src/test/java/org/raystack/depot/redis/ttl/DurationTTLTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; import org.junit.Before; import org.junit.Test; diff --git a/src/test/java/io/odpf/depot/redis/ttl/ExactTimeTTLTest.java b/src/test/java/org/raystack/depot/redis/ttl/ExactTimeTTLTest.java similarity index 96% rename from src/test/java/io/odpf/depot/redis/ttl/ExactTimeTTLTest.java rename to src/test/java/org/raystack/depot/redis/ttl/ExactTimeTTLTest.java index 086abbf5..814cdc91 100644 --- a/src/test/java/io/odpf/depot/redis/ttl/ExactTimeTTLTest.java +++ b/src/test/java/org/raystack/depot/redis/ttl/ExactTimeTTLTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; import org.junit.Before; import org.junit.Test; @@ -10,6 +10,7 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; + @RunWith(MockitoJUnitRunner.class) public class ExactTimeTTLTest { diff --git a/src/test/java/io/odpf/depot/redis/ttl/RedisTtlFactoryTest.java b/src/test/java/org/raystack/depot/redis/ttl/RedisTtlFactoryTest.java similarity index 90% rename from src/test/java/io/odpf/depot/redis/ttl/RedisTtlFactoryTest.java rename to src/test/java/org/raystack/depot/redis/ttl/RedisTtlFactoryTest.java index dfb3c776..fabda4f8 100644 --- a/src/test/java/io/odpf/depot/redis/ttl/RedisTtlFactoryTest.java +++ b/src/test/java/org/raystack/depot/redis/ttl/RedisTtlFactoryTest.java @@ -1,8 +1,8 @@ -package io.odpf.depot.redis.ttl; +package org.raystack.depot.redis.ttl; -import io.odpf.depot.config.RedisSinkConfig; -import io.odpf.depot.exception.ConfigurationException; -import io.odpf.depot.redis.enums.RedisSinkTtlType; +import org.raystack.depot.config.RedisSinkConfig; +import org.raystack.depot.exception.ConfigurationException; +import org.raystack.depot.redis.enums.RedisSinkTtlType; import org.junit.Assert; import org.junit.Before; import org.junit.Rule; @@ -13,6 +13,7 @@ import org.mockito.junit.MockitoJUnitRunner; import static org.mockito.Mockito.when; + @RunWith(MockitoJUnitRunner.class) public class RedisTtlFactoryTest { diff --git a/src/test/java/io/odpf/depot/redis/util/RedisSinkUtilsTest.java b/src/test/java/org/raystack/depot/redis/util/RedisSinkUtilsTest.java similarity index 82% rename from src/test/java/io/odpf/depot/redis/util/RedisSinkUtilsTest.java rename to src/test/java/org/raystack/depot/redis/util/RedisSinkUtilsTest.java index 79f94ed9..4ec0b99a 100644 --- a/src/test/java/io/odpf/depot/redis/util/RedisSinkUtilsTest.java +++ b/src/test/java/org/raystack/depot/redis/util/RedisSinkUtilsTest.java @@ -1,13 +1,13 @@ -package io.odpf.depot.redis.util; +package org.raystack.depot.redis.util; -import io.odpf.depot.error.ErrorInfo; -import io.odpf.depot.error.ErrorType; -import io.odpf.depot.metrics.Instrumentation; -import io.odpf.depot.metrics.StatsDReporter; -import io.odpf.depot.redis.client.entry.RedisListEntry; -import io.odpf.depot.redis.client.response.RedisClusterResponse; -import io.odpf.depot.redis.client.response.RedisResponse; -import io.odpf.depot.redis.record.RedisRecord; +import org.raystack.depot.redis.client.entry.RedisListEntry; +import org.raystack.depot.redis.client.response.RedisClusterResponse; +import org.raystack.depot.redis.client.response.RedisResponse; +import org.raystack.depot.error.ErrorInfo; +import org.raystack.depot.error.ErrorType; +import org.raystack.depot.metrics.Instrumentation; +import org.raystack.depot.metrics.StatsDReporter; +import org.raystack.depot.redis.record.RedisRecord; import org.junit.Assert; import org.junit.Test; import org.junit.runner.RunWith; @@ -38,7 +38,8 @@ public void shouldGetErrorsFromResponse() { responses.add(new RedisClusterResponse("FAILED AT 7")); responses.add(new RedisClusterResponse("FAILED AT 10")); responses.add(new RedisClusterResponse("LPUSH", "OK", null)); - Map errors = RedisSinkUtils.getErrorsFromResponse(records, responses, new Instrumentation(statsDReporter, RedisSinkUtils.class)); + Map errors = RedisSinkUtils.getErrorsFromResponse(records, responses, + new Instrumentation(statsDReporter, RedisSinkUtils.class)); Assert.assertEquals(3, errors.size()); Assert.assertEquals("FAILED AT 4", errors.get(4L).getException().getMessage()); Assert.assertEquals("FAILED AT 7", errors.get(7L).getException().getMessage()); @@ -65,7 +66,8 @@ public void shouldGetEmptyMapWhenNoErrors() { responses.forEach(response -> { Mockito.when(response.isFailed()).thenReturn(false); }); - Map errors = RedisSinkUtils.getErrorsFromResponse(records, responses, new Instrumentation(statsDReporter, RedisSinkUtils.class)); + Map errors = RedisSinkUtils.getErrorsFromResponse(records, responses, + new Instrumentation(statsDReporter, RedisSinkUtils.class)); Assert.assertTrue(errors.isEmpty()); } } diff --git a/src/test/java/io/odpf/depot/utils/JsonUtilsTest.java b/src/test/java/org/raystack/depot/utils/JsonUtilsTest.java similarity index 84% rename from src/test/java/io/odpf/depot/utils/JsonUtilsTest.java rename to src/test/java/org/raystack/depot/utils/JsonUtilsTest.java index 37aea662..10a5506a 100644 --- a/src/test/java/io/odpf/depot/utils/JsonUtilsTest.java +++ b/src/test/java/org/raystack/depot/utils/JsonUtilsTest.java @@ -1,6 +1,6 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; -import io.odpf.depot.config.OdpfSinkConfig; +import org.raystack.depot.config.SinkConfig; import org.junit.Assert; import org.junit.Test; import org.json.JSONObject; @@ -14,10 +14,10 @@ @RunWith(MockitoJUnitRunner.class) public class JsonUtilsTest { @Mock - private OdpfSinkConfig odpfSinkConfig; + private SinkConfig sinkConfig; void setSinkConfigs(boolean stringModeEnabled) { - when(odpfSinkConfig.getSinkConnectorSchemaJsonParserStringModeEnabled()).thenReturn(stringModeEnabled); + when(sinkConfig.getSinkConnectorSchemaJsonParserStringModeEnabled()).thenReturn(stringModeEnabled); } @Test @@ -29,7 +29,7 @@ public void shouldParseSimpleJsonWhenStringModeEnabled() { expectedJson.put("balance", "100"); expectedJson.put("is_vip", "YES"); byte[] payload = expectedJson.toString().getBytes(); - JSONObject parsedJson = JsonUtils.getJsonObject(odpfSinkConfig, payload); + JSONObject parsedJson = JsonUtils.getJsonObject(sinkConfig, payload); Assert.assertTrue(parsedJson.similar(expectedJson)); } @@ -42,7 +42,7 @@ public void shouldCastAllTypeToStringWhenStringModeEnabled() { originalJson.put("balance", new Double(1000.21)); originalJson.put("is_vip", Boolean.TRUE); byte[] payload = originalJson.toString().getBytes(); - JSONObject parsedJson = JsonUtils.getJsonObject(odpfSinkConfig, payload); + JSONObject parsedJson = JsonUtils.getJsonObject(sinkConfig, payload); JSONObject stringJson = new JSONObject(); stringJson.put("name", "foo"); stringJson.put("num", "100"); @@ -64,7 +64,7 @@ public void shouldThrowExceptionForNestedJsonWhenStringModeEnabled() { nestedJson.put("nestedField", nestedJsonField); byte[] payload = nestedJson.toString().getBytes(); UnsupportedOperationException exception = assertThrows(UnsupportedOperationException.class, - () -> JsonUtils.getJsonObject(odpfSinkConfig, payload)); + () -> JsonUtils.getJsonObject(sinkConfig, payload)); assertEquals("nested json structure not supported yet", exception.getMessage()); } @@ -77,7 +77,7 @@ public void shouldParseSimpleJsonWhenStringModeDisabled() { expectedJson.put("balance", new Double(1000.21)); expectedJson.put("is_vip", Boolean.TRUE); byte[] payload = expectedJson.toString().getBytes(); - JSONObject parsedJson = JsonUtils.getJsonObject(odpfSinkConfig, payload); + JSONObject parsedJson = JsonUtils.getJsonObject(sinkConfig, payload); Assert.assertTrue(parsedJson.similar(expectedJson)); } @@ -93,7 +93,7 @@ public void shouldParseNestedJsonWhenStringModeDisabled() { nestedJson.put("ID", 1); nestedJson.put("nestedField", nestedJsonField); byte[] payload = nestedJson.toString().getBytes(); - JSONObject parsedJson = JsonUtils.getJsonObject(odpfSinkConfig, payload); + JSONObject parsedJson = JsonUtils.getJsonObject(sinkConfig, payload); Assert.assertTrue(parsedJson.similar(nestedJson)); } } diff --git a/src/test/java/org/raystack/depot/utils/ProtoUtilTest.java b/src/test/java/org/raystack/depot/utils/ProtoUtilTest.java new file mode 100644 index 00000000..9651a538 --- /dev/null +++ b/src/test/java/org/raystack/depot/utils/ProtoUtilTest.java @@ -0,0 +1,74 @@ +package org.raystack.depot.utils; + +import com.google.protobuf.Descriptors; +import com.google.protobuf.DynamicMessage; +import com.google.protobuf.UnknownFieldSet; +import org.raystack.depot.TestBookingLogMessage; +import org.raystack.depot.TestLocation; +import org.junit.Test; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +public class ProtoUtilTest { + @Test + public void shouldReturnTrueWhenUnknownFieldsExistOnRootLevelFields() { + Descriptors.Descriptor bookingLogMessage = TestBookingLogMessage.getDescriptor(); + Descriptors.Descriptor location = TestLocation.getDescriptor(); + + Descriptors.FieldDescriptor fieldDescriptor = bookingLogMessage + .findFieldByName("driver_pickup_location"); + DynamicMessage dynamicMessage = DynamicMessage.newBuilder(bookingLogMessage) + .setField(fieldDescriptor, DynamicMessage.newBuilder(location) + .build()) + .setUnknownFields(UnknownFieldSet.newBuilder() + .addField(1, UnknownFieldSet.Field.getDefaultInstance()) + .addField(2, UnknownFieldSet.Field.getDefaultInstance()) + .build()) + .build(); + + boolean unknownFieldExist = ProtoUtils.hasUnknownField(dynamicMessage); + assertTrue(unknownFieldExist); + } + + @Test + public void shouldReturnTrueWhenUnknownFieldsExistOnNestedChildFields() { + Descriptors.Descriptor bookingLogMessage = TestBookingLogMessage.getDescriptor(); + Descriptors.Descriptor location = TestLocation.getDescriptor(); + Descriptors.FieldDescriptor fieldDescriptor = bookingLogMessage + .findFieldByName("driver_pickup_location"); + + DynamicMessage dynamicMessage = DynamicMessage.newBuilder(bookingLogMessage) + .setField(fieldDescriptor, DynamicMessage.newBuilder(location) + .setUnknownFields(UnknownFieldSet.newBuilder() + .addField(1, UnknownFieldSet.Field.getDefaultInstance()) + .addField(2, UnknownFieldSet.Field.getDefaultInstance()) + .build()) + .build()) + .build(); + + boolean unknownFieldExist = ProtoUtils.hasUnknownField(dynamicMessage); + assertTrue(unknownFieldExist); + } + + @Test + public void shouldReturnFalseWhenNoUnknownFieldsExist() { + Descriptors.Descriptor bookingLogMessage = TestBookingLogMessage.getDescriptor(); + Descriptors.Descriptor location = TestLocation.getDescriptor(); + + Descriptors.FieldDescriptor fieldDescriptor = bookingLogMessage + .findFieldByName("driver_pickup_location"); + DynamicMessage dynamicMessage = DynamicMessage.newBuilder(bookingLogMessage) + .setField(fieldDescriptor, DynamicMessage.newBuilder(location).build()) + .build(); + + boolean unknownFieldExist = ProtoUtils.hasUnknownField(dynamicMessage); + assertFalse(unknownFieldExist); + } + + @Test + public void shouldReturnFalseWhenRootIsNull() { + boolean unknownFieldExist = ProtoUtils.hasUnknownField(null); + assertFalse(unknownFieldExist); + } +} diff --git a/src/test/java/io/odpf/depot/utils/StringUtilsTest.java b/src/test/java/org/raystack/depot/utils/StringUtilsTest.java similarity index 97% rename from src/test/java/io/odpf/depot/utils/StringUtilsTest.java rename to src/test/java/org/raystack/depot/utils/StringUtilsTest.java index b8c94a5b..cc03594f 100644 --- a/src/test/java/io/odpf/depot/utils/StringUtilsTest.java +++ b/src/test/java/org/raystack/depot/utils/StringUtilsTest.java @@ -1,4 +1,4 @@ -package io.odpf.depot.utils; +package org.raystack.depot.utils; import org.junit.Assert; import org.junit.Test; diff --git a/src/test/proto/TestGrpc.proto b/src/test/proto/TestGrpc.proto index d43b0cac..173ed313 100644 --- a/src/test/proto/TestGrpc.proto +++ b/src/test/proto/TestGrpc.proto @@ -1,9 +1,9 @@ syntax = "proto3"; -package io.odpf.depot; +package org.raystack.depot; option java_multiple_files = true; -option java_package = "io.odpf.depot"; +option java_package = "org.raystack.depot"; option java_outer_classname = "SampleGrpcServerProto"; service TestServer { diff --git a/src/test/proto/TestLogMessage.proto b/src/test/proto/TestLogMessage.proto index 83992fed..4ab5ec69 100644 --- a/src/test/proto/TestLogMessage.proto +++ b/src/test/proto/TestLogMessage.proto @@ -1,9 +1,9 @@ syntax = "proto3"; -package io.odpf.depot; +package org.raystack.depot; option java_multiple_files = true; -option java_package = "io.odpf.depot"; +option java_package = "org.raystack.depot"; option java_outer_classname = "TestLogMessageProto"; import "google/protobuf/timestamp.proto"; diff --git a/src/test/proto/TestMessage.proto b/src/test/proto/TestMessage.proto index 8e838ecc..fa53e5b4 100644 --- a/src/test/proto/TestMessage.proto +++ b/src/test/proto/TestMessage.proto @@ -1,9 +1,9 @@ syntax = "proto3"; -package io.odpf.depot; +package org.raystack.depot; option java_multiple_files = true; -option java_package = "io.odpf.depot"; +option java_package = "org.raystack.depot"; option java_outer_classname = "TestMessageProto"; import "google/protobuf/timestamp.proto"; diff --git a/src/test/proto/TestMessageBQ.proto b/src/test/proto/TestMessageBQ.proto index f50ce383..84f1b1c6 100644 --- a/src/test/proto/TestMessageBQ.proto +++ b/src/test/proto/TestMessageBQ.proto @@ -1,6 +1,6 @@ syntax = "proto3"; -package odpf.depot; +package raystack.depot; import "google/protobuf/timestamp.proto"; import "google/protobuf/duration.proto"; @@ -8,7 +8,7 @@ import "google/protobuf/struct.proto"; import "google/type/date.proto"; option java_multiple_files = true; -option java_package = "io.odpf.depot"; +option java_package = "org.raystack.depot"; option java_outer_classname = "TestMessageProtoBQ"; message TestKeyBQ { @@ -34,6 +34,8 @@ message TestMessageBQ { repeated google.protobuf.Timestamp updated_at = 15; repeated google.protobuf.Struct attributes = 16; repeated google.protobuf.Duration intervals = 17; + int32 counter = 18; + string camelCase = 19; } message TestMessageChildBQ {