diff --git a/.github/workflows/build-report.yml b/.github/workflows/build-report.yml
new file mode 100644
index 000000000..9c2f33843
--- /dev/null
+++ b/.github/workflows/build-report.yml
@@ -0,0 +1,56 @@
+# Copyright © 2024 Cask Data, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+# http://www.apache.org/licenses/LICENSE-2.0
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+# This workflow will build a Java project with Maven
+# For more information see: https://help.github.com/actions/language-and-framework-guides/building-and-testing-java-with-maven
+# Note: Any changes to this workflow would be used only after merging into develop
+name: Build Unit Tests Report
+
+on:
+ workflow_run:
+ workflows:
+ - Build with unit tests
+ types:
+ - completed
+
+permissions:
+ actions: read # Allows reading workflow run information
+ statuses: write # Required if the action updates commit statuses
+ checks: write # Required if it updates GitHub Checks API
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ if: ${{ github.event.workflow_run.conclusion != 'skipped' }}
+
+ steps:
+ # Pinned 1.0.0 version
+ - uses: marocchino/action-workflow_run-status@54b6e87d6cb552fc5f36dbe9a722a6048725917a
+
+ - name: Download artifact
+ uses: actions/download-artifact@v4
+ with:
+ github-token: ${{ secrets.GITHUB_TOKEN }}
+ run-id: ${{ github.event.workflow_run.id }}
+ path: artifacts/
+
+ - name: Surefire Report
+ # Pinned 3.5.2 version
+ uses: mikepenz/action-junit-report@16a9560bd02f11e7e3bf6b3e2ef6bba6c9d07c32
+ if: always()
+ with:
+ report_paths: '**/target/surefire-reports/TEST-*.xml'
+ github_token: ${{ secrets.GITHUB_TOKEN }}
+ detailed_summary: true
+ commit: ${{ github.event.workflow_run.head_sha }}
+ check_name: Build Test Report
+
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 6ce0eb526..55cd4617e 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -15,28 +15,34 @@
name: Build with unit tests
on:
- workflow_run:
- workflows:
- - Trigger build
- types:
- - completed
+ push:
+ branches: [ develop, release/** ]
+ pull_request:
+ branches: [ develop, release/** ]
+ types: [opened, synchronize, reopened, labeled]
jobs:
build:
runs-on: k8s-runner-build
- if: ${{ github.event.workflow_run.conclusion != 'skipped' }}
-
+ # We allow builds:
+ # 1) When it's a merge into a branch
+ # 2) For PRs that are labeled as build and
+ # - It's a code change
+ # - A build label was just added
+ # A bit complex, but prevents builds when other labels are manipulated
+ if: >
+ github.event_name == 'push'
+ || (contains(github.event.pull_request.labels.*.name, 'build')
+ && (github.event.action != 'labeled' || github.event.label.name == 'build')
+ )
steps:
- # Pinned 1.0.0 version
- - uses: haya14busa/action-workflow_run-status@967ed83efa565c257675ed70cfe5231f062ddd94
-
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
ref: ${{ github.event.workflow_run.head_sha }}
- name: Cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ github.workflow }}-${{ hashFiles('**/pom.xml') }}
@@ -47,21 +53,12 @@ jobs:
run: mvn clean test -fae -T 2 -B -V -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.httpconnectionManager.ttlSeconds=25
- name: Archive build artifacts
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: always()
with:
- name: Build debug files
+ name: reports-${{ github.run_id }}
path: |
**/target/rat.txt
**/target/surefire-reports/*
- - name: Surefire Report
- # Pinned 3.5.2 version
- uses: mikepenz/action-junit-report@16a9560bd02f11e7e3bf6b3e2ef6bba6c9d07c32
- if: always()
- with:
- report_paths: '**/target/surefire-reports/TEST-*.xml'
- github_token: ${{ secrets.GITHUB_TOKEN }}
- detailed_summary: true
- commit: ${{ github.event.workflow_run.head_sha }}
- check_name: Test Report
\ No newline at end of file
+
diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
index 51223b8c7..846244f41 100644
--- a/.github/workflows/e2e.yml
+++ b/.github/workflows/e2e.yml
@@ -16,9 +16,9 @@ name: Build e2e tests
on:
push:
- branches: [ develop ]
+ branches: [ develop, release/** ]
pull_request:
- branches: [ develop ]
+ branches: [ develop, release/** ]
types: [ opened, synchronize, reopened, labeled ]
workflow_dispatch:
@@ -45,7 +45,7 @@ jobs:
steps:
# Pinned 1.0.0 version
- - uses: actions/checkout@v3
+ - uses: actions/checkout@v4
with:
path: plugin
submodules: 'recursive'
@@ -61,13 +61,14 @@ jobs:
- '${{ matrix.module }}/**/e2e-test/**'
- name: Checkout e2e test repo
- uses: actions/checkout@v3
+ uses: actions/checkout@v4
with:
repository: cdapio/cdap-e2e-tests
path: e2e
+ ref: release/6.10
- name: Cache
- uses: actions/cache@v3
+ uses: actions/cache@v4
with:
path: ~/.m2/repository
key: ${{ runner.os }}-maven-${{ github.workflow }}-${{ hashFiles('**/pom.xml') }}
@@ -95,6 +96,12 @@ jobs:
POSTGRESQL_USERNAME:cdapio-github-builds/POSTGRESQL_USERNAME
POSTGRESQL_PASSWORD:cdapio-github-builds/POSTGRESQL_PASSWORD
POSTGRESQL_PORT:cdapio-github-builds/POSTGRESQL_PORT
+ CLOUDSQL_POSTGRESQL_USERNAME:cdapio-github-builds/CLOUDSQL_POSTGRESQL_USERNAME
+ CLOUDSQL_POSTGRESQL_PASSWORD:cdapio-github-builds/CLOUDSQL_POSTGRESQL_PASSWORD
+ CLOUDSQL_POSTGRESQL_CONNECTION_NAME:cdapio-github-builds/CLOUDSQL_POSTGRESQL_CONNECTION_NAME
+ CLOUDSQL_MYSQL_USERNAME:cdapio-github-builds/CLOUDSQL_MYSQL_USERNAME
+ CLOUDSQL_MYSQL_PASSWORD:cdapio-github-builds/CLOUDSQL_MYSQL_PASSWORD
+ CLOUDSQL_MYSQL_CONNECTION_NAME:cdapio-github-builds/CLOUDSQL_MYSQL_CONNECTION_NAME
- name: Run required e2e tests
if: github.event_name != 'workflow_dispatch' && github.event_name != 'push' && steps.filter.outputs.e2e-test == 'false'
@@ -116,6 +123,12 @@ jobs:
POSTGRESQL_USERNAME: ${{ steps.secrets.outputs.POSTGRESQL_USERNAME }}
POSTGRESQL_PASSWORD: ${{ steps.secrets.outputs.POSTGRESQL_PASSWORD }}
POSTGRESQL_PORT: ${{ steps.secrets.outputs.POSTGRESQL_PORT }}
+ CLOUDSQL_POSTGRESQL_USERNAME: ${{ steps.secrets.outputs.CLOUDSQL_POSTGRESQL_USERNAME }}
+ CLOUDSQL_POSTGRESQL_PASSWORD: ${{ steps.secrets.outputs.CLOUDSQL_POSTGRESQL_PASSWORD }}
+ CLOUDSQL_POSTGRESQL_CONNECTION_NAME: ${{ steps.secrets.outputs.CLOUDSQL_POSTGRESQL_CONNECTION_NAME }}
+ CLOUDSQL_MYSQL_USERNAME: ${{ steps.secrets.outputs.CLOUDSQL_MYSQL_USERNAME }}
+ CLOUDSQL_MYSQL_PASSWORD: ${{ steps.secrets.outputs.CLOUDSQL_MYSQL_PASSWORD }}
+ CLOUDSQL_MYSQL_CONNECTION_NAME: ${{ steps.secrets.outputs.CLOUDSQL_MYSQL_CONNECTION_NAME }}
- name: Run all e2e tests
if: github.event_name == 'workflow_dispatch' || github.event_name == 'push' || steps.filter.outputs.e2e-test == 'true'
@@ -137,25 +150,28 @@ jobs:
POSTGRESQL_USERNAME: ${{ steps.secrets.outputs.POSTGRESQL_USERNAME }}
POSTGRESQL_PASSWORD: ${{ steps.secrets.outputs.POSTGRESQL_PASSWORD }}
POSTGRESQL_PORT: ${{ steps.secrets.outputs.POSTGRESQL_PORT }}
-
- - name: Upload report
- uses: actions/upload-artifact@v3
- if: always()
- with:
- name: Cucumber report - ${{ matrix.module }}
- path: ./**/target/cucumber-reports
+ CLOUDSQL_POSTGRESQL_USERNAME: ${{ steps.secrets.outputs.CLOUDSQL_POSTGRESQL_USERNAME }}
+ CLOUDSQL_POSTGRESQL_PASSWORD: ${{ steps.secrets.outputs.CLOUDSQL_POSTGRESQL_PASSWORD }}
+ CLOUDSQL_POSTGRESQL_CONNECTION_NAME: ${{ steps.secrets.outputs.CLOUDSQL_POSTGRESQL_CONNECTION_NAME }}
+ CLOUDSQL_MYSQL_USERNAME: ${{ steps.secrets.outputs.CLOUDSQL_MYSQL_USERNAME }}
+ CLOUDSQL_MYSQL_PASSWORD: ${{ steps.secrets.outputs.CLOUDSQL_MYSQL_PASSWORD }}
+ CLOUDSQL_MYSQL_CONNECTION_NAME: ${{ steps.secrets.outputs.CLOUDSQL_MYSQL_CONNECTION_NAME }}
- name: Upload debug files
- uses: actions/upload-artifact@v3
+ uses: actions/upload-artifact@v4
if: always()
with:
name: Debug files - ${{ matrix.module }}
path: ./**/target/e2e-debug
- name: Upload files to GCS
- uses: google-github-actions/upload-cloud-storage@v0
+ uses: google-github-actions/upload-cloud-storage@v2
if: always()
with:
path: ./plugin
destination: e2e-tests-cucumber-reports/${{ github.event.repository.name }}/${{ github.ref }}
glob: '**/target/cucumber-reports/**'
+
+ - name: Cucumber Report URL
+ if: always()
+ run: echo "https://storage.googleapis.com/e2e-tests-cucumber-reports/${{ github.event.repository.name }}/${{ github.ref }}/plugin/${{ matrix.module }}/target/cucumber-reports/advanced-reports/cucumber-html-reports/overview-features.html"
diff --git a/.github/workflows/trigger.yml b/.github/workflows/trigger.yml
deleted file mode 100644
index 11db8ac25..000000000
--- a/.github/workflows/trigger.yml
+++ /dev/null
@@ -1,46 +0,0 @@
-# Copyright © 2022 Cask Data, Inc.
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-# http://www.apache.org/licenses/LICENSE-2.0
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-
-# This workflow will trigger build.yml only when needed.
-# This way we don't flood main workflow run list
-# Note that build.yml from develop will be used even for PR builds
-# Also it will have access to the proper GITHUB_SECRET
-
-name: Trigger build
-
-on:
- push:
- branches: [ develop, release/** ]
- pull_request:
- branches: [ develop, release/** ]
- types: [opened, synchronize, reopened, labeled]
- workflow_dispatch:
-
-jobs:
- trigger:
- runs-on: ubuntu-latest
-
- # We allow builds:
- # 1) When triggered manually
- # 2) When it's a merge into a branch
- # 3) For PRs that are labeled as build and
- # - It's a code change
- # - A build label was just added
- # A bit complex, but prevents builds when other labels are manipulated
- if: >
- github.event_name == 'workflow_dispatch'
- || github.event_name == 'push'
- || (contains(github.event.pull_request.labels.*.name, 'build')
- && (github.event.action != 'labeled' || github.event.label.name == 'build')
- )
- steps:
- - name: Trigger build
- run: echo Maven build will be triggered now
\ No newline at end of file
diff --git a/amazon-redshift-plugin/docs/Redshift-batchsource.md b/amazon-redshift-plugin/docs/Redshift-batchsource.md
new file mode 100644
index 000000000..38873b15a
--- /dev/null
+++ b/amazon-redshift-plugin/docs/Redshift-batchsource.md
@@ -0,0 +1,102 @@
+# Amazon Redshift Batch Source
+
+Description
+-----------
+Reads from an Amazon Redshift database using a configurable SQL query.
+Outputs one record for each row returned by the query.
+
+
+Use Case
+--------
+The source is used whenever you need to read from an Amazon Redshift database. For example, you may want
+to create daily snapshots of a database table by using this source and writing to
+a TimePartitionedFileSet.
+
+
+Properties
+----------
+**Reference Name:** Name used to uniquely identify this source for lineage, annotating metadata, etc.
+
+**JDBC Driver name:** Name of the JDBC driver to use.
+
+**Host:** Host URL of the current master instance of Redshift cluster.
+
+**Port:** Port that Redshift master instance is listening to.
+
+**Database:** Redshift database name.
+
+**Import Query:** The SELECT query to use to import data from the specified table.
+You can specify an arbitrary number of columns to import, or import all columns using \*. The Query should
+contain the '$CONDITIONS' string. For example, 'SELECT * FROM table WHERE $CONDITIONS'.
+The '$CONDITIONS' string will be replaced by 'splitBy' field limits specified by the bounding query.
+The '$CONDITIONS' string is not required if numSplits is set to one.
+
+**Bounding Query:** Bounding Query should return the min and max of the values of the 'splitBy' field.
+For example, 'SELECT MIN(id),MAX(id) FROM table'. Not required if numSplits is set to one.
+
+**Split-By Field Name:** Field Name which will be used to generate splits. Not required if numSplits is set to one.
+
+**Number of Splits to Generate:** Number of splits to generate.
+
+**Username:** User identity for connecting to the specified database.
+
+**Password:** Password to use to connect to the specified database.
+
+**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
+will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
+
+**Schema:** The schema of records output by the source. This will be used in place of whatever schema comes
+back from the query. However, it must match the schema that comes back from the query,
+except it can mark fields as nullable and can contain a subset of the fields.
+
+**Fetch Size:** The number of rows to fetch at a time per split. Larger fetch size can result in faster import,
+with the tradeoff of higher memory usage.
+
+Example
+------
+Suppose you want to read data from an Amazon Redshift database named "prod" that is running on
+"redshift.xyz.eu-central-1.redshift.amazonaws.com", port 5439, as "sa" user with "Test11" password.
+Ensure that the driver for Redshift is installed (you can also provide driver name for some specific driver,
+otherwise "redshift" will be used), then configure the plugin with:then configure plugin with:
+
+```
+Reference Name: "src1"
+Driver Name: "redshift"
+Host: "redshift.xyz.eu-central-1.redshift.amazonaws.com"
+Port: 5439
+Database: "prod"
+Import Query: "select id, name, email, phone from users;"
+Number of Splits to Generate: 1
+Username: "sa"
+Password: "Test11"
+```
+
+Data Types Mapping
+------------------
+
+Mapping of Redshift types to CDAP schema:
+
+| Redshift Data Type | CDAP Schema Data Type | Comment |
+|-----------------------------------------------------|-----------------------|----------------------------------|
+| bigint | long | |
+| boolean | boolean | |
+| character | string | |
+| character varying | string | |
+| double precision | double | |
+| integer | int | |
+| numeric(precision, scale)/decimal(precision, scale) | decimal | |
+| numeric(with 0 precision) | string | |
+| real | float | |
+| smallint | int | |
+| smallserial | int | |
+| text | string | |
+| date | date | |
+| time [ (p) ] [ without time zone ] | time | |
+| time [ (p) ] with time zone | string | |
+| timestamp [ (p) ] [ without time zone ] | timestamp | |
+| timestamp [ (p) ] with time zone | timestamp | stored in UTC format in database |
+| xml | string | |
+| json | string | |
+| super | string | |
+| geometry | bytes | |
+| hllsketch | string | |
diff --git a/amazon-redshift-plugin/docs/Redshift-connector.md b/amazon-redshift-plugin/docs/Redshift-connector.md
new file mode 100644
index 000000000..368d9e09f
--- /dev/null
+++ b/amazon-redshift-plugin/docs/Redshift-connector.md
@@ -0,0 +1,26 @@
+# Amazon Redshift Connection
+
+Description
+-----------
+Use this connection to access data in an Amazon Redshift database using JDBC.
+
+Properties
+----------
+**Name:** Name of the connection. Connection names must be unique in a namespace.
+
+**Description:** Description of the connection.
+
+**JDBC Driver name:** Name of the JDBC driver to use.
+
+**Host:** Host of the current master instance of Redshift cluster.
+
+**Port:** Port that Redshift master instance is listening to.
+
+**Database:** Redshift database name.
+
+**Username:** User identity for connecting to the specified database.
+
+**Password:** Password to use to connect to the specified database.
+
+**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
+will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/amazon-redshift-plugin/icons/Redshift-batchsource.png b/amazon-redshift-plugin/icons/Redshift-batchsource.png
new file mode 100644
index 000000000..11c334799
Binary files /dev/null and b/amazon-redshift-plugin/icons/Redshift-batchsource.png differ
diff --git a/amazon-redshift-plugin/pom.xml b/amazon-redshift-plugin/pom.xml
new file mode 100644
index 000000000..1d79a6283
--- /dev/null
+++ b/amazon-redshift-plugin/pom.xml
@@ -0,0 +1,139 @@
+
+
+
+
+ database-plugins-parent
+ io.cdap.plugin
+ 1.10.8
+
+
+ Amazon Redshift plugin
+ amazon-redshift-plugin
+ 4.0.0
+
+
+ 2.1.0.18
+
+
+
+
+ redshift
+ http://redshift-maven-repository.s3-website-us-east-1.amazonaws.com/release
+
+
+
+
+
+ io.cdap.cdap
+ cdap-etl-api
+
+
+ io.cdap.plugin
+ database-commons
+ ${project.version}
+
+
+ io.cdap.plugin
+ hydrator-common
+
+
+ com.google.guava
+ guava
+
+
+
+
+ com.amazon.redshift
+ redshift-jdbc42
+ ${redshift-jdbc.version}
+ test
+
+
+ io.cdap.plugin
+ database-commons
+ ${project.version}
+ test-jar
+ test
+
+
+ io.cdap.cdap
+ hydrator-test
+
+
+ io.cdap.cdap
+ cdap-data-pipeline3_2.12
+
+
+ junit
+ junit
+
+
+ org.mockito
+ mockito-core
+ test
+
+
+ io.cdap.cdap
+ cdap-api
+ provided
+
+
+ org.jetbrains
+ annotations
+ RELEASE
+ compile
+
+
+
+
+
+ io.cdap
+ cdap-maven-plugin
+
+
+ org.apache.felix
+ maven-bundle-plugin
+ 5.1.2
+ true
+
+
+ <_exportcontents>
+ io.cdap.plugin.amazon.redshift.*;
+ io.cdap.plugin.db.source.*;
+ org.apache.commons.lang;
+ org.apache.commons.logging.*;
+ org.codehaus.jackson.*
+
+ *;inline=false;scope=compile
+ true
+ lib
+
+
+
+
+ package
+
+ bundle
+
+
+
+
+
+
+
diff --git a/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConnector.java b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConnector.java
new file mode 100644
index 000000000..fb8cac4a7
--- /dev/null
+++ b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConnector.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.cdap.api.annotation.Category;
+import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Name;
+import io.cdap.cdap.api.annotation.Plugin;
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.etl.api.batch.BatchSource;
+import io.cdap.cdap.etl.api.connector.Connector;
+import io.cdap.cdap.etl.api.connector.ConnectorSpec;
+import io.cdap.cdap.etl.api.connector.ConnectorSpecRequest;
+import io.cdap.cdap.etl.api.connector.PluginSpec;
+import io.cdap.plugin.common.Constants;
+import io.cdap.plugin.common.ReferenceNames;
+import io.cdap.plugin.common.db.DBConnectorPath;
+import io.cdap.plugin.common.db.DBPath;
+import io.cdap.plugin.db.SchemaReader;
+import io.cdap.plugin.db.connector.AbstractDBSpecificConnector;
+import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+
+import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
+
+/**
+ * Amazon Redshift Database Connector that connects to Amazon Redshift database via JDBC.
+ */
+@Plugin(type = Connector.PLUGIN_TYPE)
+@Name(RedshiftConnector.NAME)
+@Description("Connection to access data in Amazon Redshift using JDBC.")
+@Category("Database")
+public class RedshiftConnector extends AbstractDBSpecificConnector {
+ public static final String NAME = RedshiftConstants.PLUGIN_NAME;
+ private final RedshiftConnectorConfig config;
+
+ public RedshiftConnector(RedshiftConnectorConfig config) {
+ super(config);
+ this.config = config;
+ }
+
+ @Override
+ protected DBConnectorPath getDBConnectorPath(String path) throws IOException {
+ return new DBPath(path, true);
+ }
+
+ @Override
+ public boolean supportSchema() {
+ return true;
+ }
+
+ @Override
+ protected Class extends DBWritable> getDBRecordType() {
+ return RedshiftDBRecord.class;
+ }
+
+ @Override
+ public StructuredRecord transform(LongWritable longWritable, RedshiftDBRecord redshiftDBRecord) {
+ return redshiftDBRecord.getRecord();
+ }
+
+ @Override
+ protected SchemaReader getSchemaReader(String sessionID) {
+ return new RedshiftSchemaReader(sessionID);
+ }
+
+ @Override
+ protected String getTableName(String database, String schema, String table) {
+ return String.format("\"%s\".\"%s\"", schema, table);
+ }
+
+ @Override
+ protected String getRandomQuery(String tableName, int limit) {
+ return String.format("SELECT * FROM %s\n" +
+ "TABLESAMPLE BERNOULLI (100.0 * %d / (SELECT COUNT(*) FROM %s))",
+ tableName, limit, tableName);
+ }
+
+ @Override
+ protected void setConnectorSpec(ConnectorSpecRequest request, DBConnectorPath path,
+ ConnectorSpec.Builder builder) {
+ Map sourceProperties = new HashMap<>();
+ setConnectionProperties(sourceProperties, request);
+ builder
+ .addRelatedPlugin(new PluginSpec(RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE, sourceProperties));
+
+ String schema = path.getSchema();
+ sourceProperties.put(RedshiftSource.RedshiftSourceConfig.NUM_SPLITS, "1");
+ sourceProperties.put(RedshiftSource.RedshiftSourceConfig.FETCH_SIZE,
+ RedshiftSource.RedshiftSourceConfig.DEFAULT_FETCH_SIZE);
+ String table = path.getTable();
+ if (table == null) {
+ return;
+ }
+ sourceProperties.put(RedshiftSource.RedshiftSourceConfig.IMPORT_QUERY,
+ getTableQuery(path.getDatabase(), schema, table));
+ sourceProperties.put(Constants.Reference.REFERENCE_NAME, ReferenceNames.cleanseReferenceName(table));
+ }
+
+}
diff --git a/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorConfig.java b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorConfig.java
new file mode 100644
index 000000000..bae0013b3
--- /dev/null
+++ b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorConfig.java
@@ -0,0 +1,87 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Macro;
+import io.cdap.cdap.api.annotation.Name;
+import io.cdap.plugin.db.ConnectionConfig;
+import io.cdap.plugin.db.connector.AbstractDBConnectorConfig;
+
+import javax.annotation.Nullable;
+
+/**
+ * Configuration for Redshift connector
+ */
+public class RedshiftConnectorConfig extends AbstractDBConnectorConfig {
+
+ @Name(ConnectionConfig.HOST)
+ @Description(
+ "The endpoint of the Amazon Redshift cluster.")
+ @Macro
+ private String host;
+
+ @Name(ConnectionConfig.PORT)
+ @Description("Database port number")
+ @Macro
+ @Nullable
+ private Integer port;
+
+ @Name(ConnectionConfig.DATABASE)
+ @Description("Database name to connect to")
+ @Macro
+ private String database;
+
+ public RedshiftConnectorConfig(String username, String password, String jdbcPluginName,
+ String connectionArguments, String host,
+ String database, @Nullable Integer port) {
+ this.user = username;
+ this.password = password;
+ this.jdbcPluginName = jdbcPluginName;
+ this.connectionArguments = connectionArguments;
+ this.host = host;
+ this.database = database;
+ this.port = port;
+ }
+
+ public String getDatabase() {
+ return database;
+ }
+
+ public String getHost() {
+ return host;
+ }
+
+ public int getPort() {
+ return port == null ? 5439 : port;
+ }
+
+ @Override
+ public String getConnectionString() {
+ return String.format(
+ RedshiftConstants.REDSHIFT_CONNECTION_STRING_FORMAT,
+ host,
+ getPort(),
+ database);
+ }
+
+ @Override
+ public boolean canConnect() {
+ return super.canConnect() && !containsMacro(ConnectionConfig.HOST) &&
+ !containsMacro(ConnectionConfig.PORT) && !containsMacro(ConnectionConfig.DATABASE);
+ }
+}
diff --git a/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConstants.java b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConstants.java
new file mode 100644
index 000000000..081052fb1
--- /dev/null
+++ b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftConstants.java
@@ -0,0 +1,27 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+/** Amazon Redshift constants. */
+public final class RedshiftConstants {
+
+ private RedshiftConstants() {
+ }
+
+ public static final String PLUGIN_NAME = "Redshift";
+ public static final String REDSHIFT_CONNECTION_STRING_FORMAT = "jdbc:redshift://%s:%s/%s";
+}
diff --git a/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftDBRecord.java b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftDBRecord.java
new file mode 100644
index 000000000..38e9140d8
--- /dev/null
+++ b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftDBRecord.java
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.data.schema.Schema;
+import io.cdap.plugin.db.DBRecord;
+import io.cdap.plugin.db.SchemaReader;
+import io.cdap.plugin.util.DBUtils;
+
+import java.math.BigDecimal;
+import java.math.RoundingMode;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
+import java.time.ZonedDateTime;
+
+/**
+ * Writable class for Redshift Source
+ */
+public class RedshiftDBRecord extends DBRecord {
+
+ /**
+ * Used in map-reduce. Do not remove.
+ */
+ @SuppressWarnings("unused")
+ public RedshiftDBRecord() {
+ }
+
+ @Override
+ protected void handleField(ResultSet resultSet, StructuredRecord.Builder recordBuilder, Schema.Field field,
+ int columnIndex, int sqlType, int sqlPrecision, int sqlScale) throws SQLException {
+ ResultSetMetaData metadata = resultSet.getMetaData();
+ String columnTypeName = metadata.getColumnTypeName(columnIndex);
+ if (isUseSchema(metadata, columnIndex)) {
+ setFieldAccordingToSchema(resultSet, recordBuilder, field, columnIndex);
+ return;
+ }
+
+ // HandleTimestamp
+ if (sqlType == Types.TIMESTAMP && columnTypeName.equalsIgnoreCase("timestamp")) {
+ Timestamp timestamp = resultSet.getTimestamp(columnIndex, DBUtils.PURE_GREGORIAN_CALENDAR);
+ if (timestamp != null) {
+ ZonedDateTime zonedDateTime = OffsetDateTime.of(timestamp.toLocalDateTime(), OffsetDateTime.now().getOffset())
+ .atZoneSameInstant(ZoneId.of("UTC"));
+ Schema nonNullableSchema = field.getSchema().isNullable() ?
+ field.getSchema().getNonNullable() : field.getSchema();
+ setZonedDateTimeBasedOnOutputSchema(recordBuilder, nonNullableSchema.getLogicalType(),
+ field.getName(), zonedDateTime);
+ } else {
+ recordBuilder.set(field.getName(), null);
+ }
+ return;
+ }
+
+ // HandleTimestampTZ
+ if (sqlType == Types.TIMESTAMP && columnTypeName.equalsIgnoreCase("timestamptz")) {
+ OffsetDateTime timestamp = resultSet.getObject(columnIndex, OffsetDateTime.class);
+ if (timestamp != null) {
+ recordBuilder.setTimestamp(field.getName(), timestamp.atZoneSameInstant(ZoneId.of("UTC")));
+ } else {
+ recordBuilder.set(field.getName(), null);
+ }
+ return;
+ }
+
+ // HandleNumeric
+ int columnType = metadata.getColumnType(columnIndex);
+ if (columnType == Types.NUMERIC) {
+ Schema nonNullableSchema = field.getSchema().isNullable() ?
+ field.getSchema().getNonNullable() : field.getSchema();
+ int precision = metadata.getPrecision(columnIndex);
+ if (precision == 0 && Schema.Type.STRING.equals(nonNullableSchema.getType())) {
+ // When output schema is set to String for precision less numbers
+ recordBuilder.set(field.getName(), resultSet.getString(columnIndex));
+ } else if (Schema.LogicalType.DECIMAL.equals(nonNullableSchema.getLogicalType())) {
+ BigDecimal originalDecimalValue = resultSet.getBigDecimal(columnIndex);
+ if (originalDecimalValue != null) {
+ BigDecimal newDecimalValue = new BigDecimal(originalDecimalValue.toPlainString())
+ .setScale(nonNullableSchema.getScale(), RoundingMode.HALF_EVEN);
+ recordBuilder.setDecimal(field.getName(), newDecimalValue);
+ }
+ }
+ return;
+ }
+ setField(resultSet, recordBuilder, field, columnIndex, sqlType, sqlPrecision, sqlScale);
+ }
+
+ private void setZonedDateTimeBasedOnOutputSchema(StructuredRecord.Builder recordBuilder,
+ Schema.LogicalType logicalType,
+ String fieldName,
+ ZonedDateTime zonedDateTime) {
+ if (Schema.LogicalType.DATETIME.equals(logicalType)) {
+ recordBuilder.setDateTime(fieldName, zonedDateTime.toLocalDateTime());
+ } else if (Schema.LogicalType.TIMESTAMP_MICROS.equals(logicalType)) {
+ recordBuilder.setTimestamp(fieldName, zonedDateTime);
+ }
+ }
+
+ private static boolean isUseSchema(ResultSetMetaData metadata, int columnIndex) throws SQLException {
+ String columnTypeName = metadata.getColumnTypeName(columnIndex);
+ // If the column Type Name is present in the String mapped Redshift types then return true.
+ return RedshiftSchemaReader.STRING_MAPPED_REDSHIFT_TYPES_NAMES.contains(columnTypeName);
+ }
+
+ @Override
+ protected SchemaReader getSchemaReader() {
+ return new RedshiftSchemaReader();
+ }
+
+}
diff --git a/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftSchemaReader.java b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftSchemaReader.java
new file mode 100644
index 000000000..df9938a45
--- /dev/null
+++ b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftSchemaReader.java
@@ -0,0 +1,117 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import com.google.common.collect.ImmutableSet;
+import com.google.common.collect.Lists;
+import io.cdap.cdap.api.data.schema.Schema;
+import io.cdap.plugin.db.CommonSchemaReader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.List;
+import java.util.Set;
+
+/**
+ * Redshift Schema Reader class
+ */
+public class RedshiftSchemaReader extends CommonSchemaReader {
+
+ private static final Logger LOG = LoggerFactory.getLogger(RedshiftSchemaReader.class);
+
+ public static final Set STRING_MAPPED_REDSHIFT_TYPES_NAMES = ImmutableSet.of(
+ "timetz", "money"
+ );
+
+ private final String sessionID;
+
+ public RedshiftSchemaReader() {
+ this(null);
+ }
+
+ public RedshiftSchemaReader(String sessionID) {
+ super();
+ this.sessionID = sessionID;
+ }
+
+ @Override
+ public Schema getSchema(ResultSetMetaData metadata, int index) throws SQLException {
+ String typeName = metadata.getColumnTypeName(index);
+ int columnType = metadata.getColumnType(index);
+
+ if (STRING_MAPPED_REDSHIFT_TYPES_NAMES.contains(typeName)) {
+ return Schema.of(Schema.Type.STRING);
+ }
+ if (typeName.equalsIgnoreCase("INT")) {
+ return Schema.of(Schema.Type.INT);
+ }
+ if (typeName.equalsIgnoreCase("BIGINT")) {
+ return Schema.of(Schema.Type.LONG);
+ }
+
+ // If it is a numeric type without precision then use the Schema of String to avoid any precision loss
+ if (Types.NUMERIC == columnType) {
+ int precision = metadata.getPrecision(index);
+ if (precision == 0) {
+ LOG.warn(String.format("Field '%s' is a %s type without precision and scale, "
+ + "converting into STRING type to avoid any precision loss.",
+ metadata.getColumnName(index),
+ metadata.getColumnTypeName(index)));
+ return Schema.of(Schema.Type.STRING);
+ }
+ }
+
+ if (typeName.equalsIgnoreCase("timestamp")) {
+ return Schema.of(Schema.LogicalType.DATETIME);
+ }
+
+ return super.getSchema(metadata, index);
+ }
+
+ @Override
+ public boolean shouldIgnoreColumn(ResultSetMetaData metadata, int index) throws SQLException {
+ if (sessionID == null) {
+ return false;
+ }
+ return metadata.getColumnName(index).equals("c_" + sessionID) ||
+ metadata.getColumnName(index).equals("sqn_" + sessionID);
+ }
+
+ @Override
+ public List getSchemaFields(ResultSet resultSet) throws SQLException {
+ List schemaFields = Lists.newArrayList();
+ ResultSetMetaData metadata = resultSet.getMetaData();
+ // ResultSetMetadata columns are numbered starting with 1
+ for (int i = 1; i <= metadata.getColumnCount(); i++) {
+ if (shouldIgnoreColumn(metadata, i)) {
+ continue;
+ }
+ String columnName = metadata.getColumnName(i);
+ Schema columnSchema = getSchema(metadata, i);
+ // Setting up schema as nullable as cdata driver doesn't provide proper information about isNullable.
+ columnSchema = Schema.nullableOf(columnSchema);
+ Schema.Field field = Schema.Field.of(columnName, columnSchema);
+ schemaFields.add(field);
+ }
+ return schemaFields;
+ }
+
+}
diff --git a/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftSource.java b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftSource.java
new file mode 100644
index 000000000..6a0df3a2d
--- /dev/null
+++ b/amazon-redshift-plugin/src/main/java/io/cdap/plugin/amazon/redshift/RedshiftSource.java
@@ -0,0 +1,136 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import com.google.common.annotations.VisibleForTesting;
+import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Macro;
+import io.cdap.cdap.api.annotation.Metadata;
+import io.cdap.cdap.api.annotation.MetadataProperty;
+import io.cdap.cdap.api.annotation.Name;
+import io.cdap.cdap.api.annotation.Plugin;
+import io.cdap.cdap.etl.api.FailureCollector;
+import io.cdap.cdap.etl.api.batch.BatchSource;
+import io.cdap.cdap.etl.api.batch.BatchSourceContext;
+import io.cdap.cdap.etl.api.connector.Connector;
+import io.cdap.plugin.common.Asset;
+import io.cdap.plugin.common.ConfigUtil;
+import io.cdap.plugin.common.LineageRecorder;
+import io.cdap.plugin.db.SchemaReader;
+import io.cdap.plugin.db.config.AbstractDBSpecificSourceConfig;
+import io.cdap.plugin.db.source.AbstractDBSource;
+import io.cdap.plugin.util.DBUtils;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+
+import java.util.Collections;
+import java.util.Map;
+import javax.annotation.Nullable;
+
+/**
+ * Batch source to read from an Amazon Redshift database.
+ */
+@Plugin(type = BatchSource.PLUGIN_TYPE)
+@Name(RedshiftConstants.PLUGIN_NAME)
+@Description(
+ "Reads from a Amazon Redshift database table(s) using a configurable SQL query."
+ + " Outputs one record for each row returned by the query.")
+@Metadata(properties = {@MetadataProperty(key = Connector.PLUGIN_TYPE, value = RedshiftConnector.NAME)})
+public class RedshiftSource
+ extends AbstractDBSource {
+
+ private final RedshiftSourceConfig redshiftSourceConfig;
+
+ public RedshiftSource(RedshiftSourceConfig redshiftSourceConfig) {
+ super(redshiftSourceConfig);
+ this.redshiftSourceConfig = redshiftSourceConfig;
+ }
+
+ @Override
+ protected SchemaReader getSchemaReader() {
+ return new RedshiftSchemaReader();
+ }
+
+ @Override
+ protected Class extends DBWritable> getDBRecordType() {
+ return RedshiftDBRecord.class;
+ }
+
+ @Override
+ protected String createConnectionString() {
+ return String.format(
+ RedshiftConstants.REDSHIFT_CONNECTION_STRING_FORMAT,
+ redshiftSourceConfig.connection.getHost(),
+ redshiftSourceConfig.connection.getPort(),
+ redshiftSourceConfig.connection.getDatabase());
+ }
+
+ @Override
+ protected LineageRecorder getLineageRecorder(BatchSourceContext context) {
+ String fqn = DBUtils.constructFQN("redshift", redshiftSourceConfig.getConnection().getHost(),
+ redshiftSourceConfig.getConnection().getPort(),
+ redshiftSourceConfig.getConnection().getDatabase(),
+ redshiftSourceConfig.getReferenceName());
+ Asset.Builder assetBuilder = Asset.builder(redshiftSourceConfig.getReferenceName()).setFqn(fqn);
+ return new LineageRecorder(context, assetBuilder.build());
+ }
+
+ /**
+ * Redshift source config.
+ */
+ public static class RedshiftSourceConfig extends AbstractDBSpecificSourceConfig {
+
+ @Name(ConfigUtil.NAME_USE_CONNECTION)
+ @Nullable
+ @Description("Whether to use an existing connection.")
+ private Boolean useConnection;
+
+ @Name(ConfigUtil.NAME_CONNECTION)
+ @Macro
+ @Nullable
+ @Description("The existing connection to use.")
+ private RedshiftConnectorConfig connection;
+
+ @Override
+ public Map getDBSpecificArguments() {
+ return Collections.emptyMap();
+ }
+
+ @VisibleForTesting
+ public RedshiftSourceConfig(@Nullable Boolean useConnection,
+ @Nullable RedshiftConnectorConfig connection) {
+ this.useConnection = useConnection;
+ this.connection = connection;
+ }
+
+ @Override
+ public Integer getFetchSize() {
+ Integer fetchSize = super.getFetchSize();
+ return fetchSize == null ? Integer.parseInt(DEFAULT_FETCH_SIZE) : fetchSize;
+ }
+
+ @Override
+ protected RedshiftConnectorConfig getConnection() {
+ return connection;
+ }
+
+ @Override
+ public void validate(FailureCollector collector) {
+ ConfigUtil.validateConnection(this, useConnection, connection, collector);
+ super.validate(collector);
+ }
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorTest.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorTest.java
new file mode 100644
index 000000000..a43eb4302
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorTest.java
@@ -0,0 +1,39 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.plugin.db.connector.DBSpecificConnectorBaseTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+/**
+ * Unit tests for {@link RedshiftConnector}
+ */
+public class RedshiftConnectorTest extends DBSpecificConnectorBaseTest {
+
+ private static final String JDBC_DRIVER_CLASS_NAME = "com.amazon.redshift.Driver";
+
+ @Test
+ public void test() throws IOException, ClassNotFoundException, InstantiationException, IllegalAccessException {
+ test(new RedshiftConnector(
+ new RedshiftConnectorConfig(username, password, JDBC_PLUGIN_NAME, connectionArguments, host, database,
+ port)),
+ JDBC_DRIVER_CLASS_NAME, RedshiftConstants.PLUGIN_NAME);
+ }
+}
+
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorUnitTest.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorUnitTest.java
new file mode 100644
index 000000000..47e8b0a52
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftConnectorUnitTest.java
@@ -0,0 +1,68 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.ExpectedException;
+
+/**
+ * Unit tests for {@link RedshiftConnector}
+ */
+public class RedshiftConnectorUnitTest {
+ @Rule
+ public ExpectedException expectedEx = ExpectedException.none();
+
+ private static final RedshiftConnector CONNECTOR = new RedshiftConnector(null);
+
+ /**
+ * Unit test for getTableName()
+ */
+ @Test
+ public void getTableNameTest() {
+ Assert.assertEquals("\"schema\".\"table\"",
+ CONNECTOR.getTableName("db", "schema", "table"));
+ }
+
+ @Test
+ public void getRandomQuery() {
+ Assert.assertEquals("SELECT * FROM TestData\n" +
+ "TABLESAMPLE BERNOULLI (100.0 * 10 / (SELECT COUNT(*) FROM TestData))",
+ CONNECTOR.getRandomQuery("TestData", 10));
+ }
+
+ @Test
+ public void getDBRecordType() {
+ Assert.assertEquals("class io.cdap.plugin.amazon.redshift.RedshiftDBRecord",
+ CONNECTOR.getDBRecordType().toString());
+ }
+
+ /**
+ * Unit tests for getTableQuery()
+ */
+ @Test
+ public void getTableQueryTest() {
+ String tableName = CONNECTOR.getTableName("db", "schema", "table");
+
+ // random query
+ Assert.assertEquals(String.format("SELECT * FROM %s\n" +
+ "TABLESAMPLE BERNOULLI (100.0 * %d / (SELECT COUNT(*) FROM %s))",
+ tableName, 100, tableName),
+ CONNECTOR.getRandomQuery(tableName, 100));
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftDBRecordUnitTest.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftDBRecordUnitTest.java
new file mode 100644
index 000000000..4d11004e4
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftDBRecordUnitTest.java
@@ -0,0 +1,155 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.data.schema.Schema;
+import io.cdap.plugin.util.DBUtils;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.math.BigDecimal;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.time.OffsetDateTime;
+import java.time.ZoneId;
+import java.time.ZoneOffset;
+
+import static org.mockito.ArgumentMatchers.eq;
+import static org.mockito.Mockito.when;
+
+/**
+ * Unit Test class for the PostgresDBRecord
+ */
+@RunWith(MockitoJUnitRunner.class)
+public class RedshiftDBRecordUnitTest {
+
+ private static final int DEFAULT_PRECISION = 38;
+
+ /**
+ * Validate the precision less Numbers handling against following use cases.
+ * 1. Ensure that the numeric type with [p,s] set as [38,4] detect as BigDecimal(38,4) in cdap.
+ * 2. Ensure that the numeric type without [p,s] detect as String type in cdap.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void validatePrecisionLessDecimalParsing() throws Exception {
+ Schema.Field field1 = Schema.Field.of("ID1", Schema.decimalOf(DEFAULT_PRECISION, 4));
+ Schema.Field field2 = Schema.Field.of("ID2", Schema.of(Schema.Type.STRING));
+
+ Schema schema = Schema.recordOf(
+ "dbRecord",
+ field1,
+ field2
+ );
+
+ ResultSetMetaData resultSetMetaData = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(resultSetMetaData.getColumnType(Mockito.eq(1))).thenReturn(Types.NUMERIC);
+ Mockito.when(resultSetMetaData.getPrecision(Mockito.eq(1))).thenReturn(DEFAULT_PRECISION);
+ Mockito.when(resultSetMetaData.getColumnType(eq(2))).thenReturn(Types.NUMERIC);
+ when(resultSetMetaData.getPrecision(eq(2))).thenReturn(0);
+
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+
+ when(resultSet.getMetaData()).thenReturn(resultSetMetaData);
+ when(resultSet.getBigDecimal(eq(1))).thenReturn(BigDecimal.valueOf(123.4568));
+ when(resultSet.getString(eq(2))).thenReturn("123.4568");
+
+ StructuredRecord.Builder builder = StructuredRecord.builder(schema);
+ RedshiftDBRecord dbRecord = new RedshiftDBRecord();
+ dbRecord.handleField(resultSet, builder, field1, 1, Types.NUMERIC, DEFAULT_PRECISION, 4);
+ dbRecord.handleField(resultSet, builder, field2, 2, Types.NUMERIC, 0, -127);
+
+ StructuredRecord record = builder.build();
+ Assert.assertTrue(record.getDecimal("ID1") instanceof BigDecimal);
+ Assert.assertEquals(record.getDecimal("ID1"), BigDecimal.valueOf(123.4568));
+ Assert.assertTrue(record.get("ID2") instanceof String);
+ Assert.assertEquals(record.get("ID2"), "123.4568");
+ }
+
+ @Test
+ public void validateTimestampType() throws SQLException {
+ OffsetDateTime offsetDateTime = OffsetDateTime.of(2023, 1, 1, 1, 0, 0, 0, ZoneOffset.UTC);
+ ResultSetMetaData metaData = Mockito.mock(ResultSetMetaData.class);
+ when(metaData.getColumnTypeName(eq(0))).thenReturn("timestamp");
+
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+ when(resultSet.getMetaData()).thenReturn(metaData);
+ when(resultSet.getTimestamp(eq(0), eq(DBUtils.PURE_GREGORIAN_CALENDAR)))
+ .thenReturn(Timestamp.from(offsetDateTime.toInstant()));
+
+ Schema.Field field1 = Schema.Field.of("field1", Schema.of(Schema.LogicalType.DATETIME));
+ Schema schema = Schema.recordOf(
+ "dbRecord",
+ field1
+ );
+ StructuredRecord.Builder builder = StructuredRecord.builder(schema);
+
+ RedshiftDBRecord dbRecord = new RedshiftDBRecord();
+ dbRecord.handleField(resultSet, builder, field1, 0, Types.TIMESTAMP, 0, 0);
+ StructuredRecord record = builder.build();
+ Assert.assertNotNull(record);
+ Assert.assertNotNull(record.getDateTime("field1"));
+ Assert.assertEquals(record.getDateTime("field1").toInstant(ZoneOffset.UTC), offsetDateTime.toInstant());
+
+ // Validate backward compatibility
+
+ field1 = Schema.Field.of("field1", Schema.of(Schema.LogicalType.TIMESTAMP_MICROS));
+ schema = Schema.recordOf(
+ "dbRecord",
+ field1
+ );
+ builder = StructuredRecord.builder(schema);
+ dbRecord.handleField(resultSet, builder, field1, 0, Types.TIMESTAMP, 0, 0);
+ record = builder.build();
+ Assert.assertNotNull(record);
+ Assert.assertNotNull(record.getTimestamp("field1"));
+ Assert.assertEquals(record.getTimestamp("field1").toInstant(), offsetDateTime.toInstant());
+ }
+
+ @Test
+ public void validateTimestampTZType() throws SQLException {
+ OffsetDateTime offsetDateTime = OffsetDateTime.of(2023, 1, 1, 1, 0, 0, 0, ZoneOffset.UTC);
+ ResultSetMetaData metaData = Mockito.mock(ResultSetMetaData.class);
+ when(metaData.getColumnTypeName(eq(0))).thenReturn("timestamptz");
+
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+ when(resultSet.getMetaData()).thenReturn(metaData);
+ when(resultSet.getObject(eq(0), eq(OffsetDateTime.class))).thenReturn(offsetDateTime);
+
+ Schema.Field field1 = Schema.Field.of("field1", Schema.of(Schema.LogicalType.TIMESTAMP_MICROS));
+ Schema schema = Schema.recordOf(
+ "dbRecord",
+ field1
+ );
+ StructuredRecord.Builder builder = StructuredRecord.builder(schema);
+
+ RedshiftDBRecord dbRecord = new RedshiftDBRecord();
+ dbRecord.handleField(resultSet, builder, field1, 0, Types.TIMESTAMP, 0, 0);
+ StructuredRecord record = builder.build();
+ Assert.assertNotNull(record);
+ Assert.assertNotNull(record.getTimestamp("field1", ZoneId.of("UTC")));
+ Assert.assertEquals(record.getTimestamp("field1", ZoneId.of("UTC")).toInstant(), offsetDateTime.toInstant());
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftFailedConnectionTest.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftFailedConnectionTest.java
new file mode 100644
index 000000000..2d21c4478
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftFailedConnectionTest.java
@@ -0,0 +1,38 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.plugin.db.connector.DBSpecificFailedConnectionTest;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class RedshiftFailedConnectionTest extends DBSpecificFailedConnectionTest {
+ private static final String JDBC_DRIVER_CLASS_NAME = "com.amazon.redshift.Driver";
+
+ @Test
+ public void test() throws ClassNotFoundException, IOException {
+
+ RedshiftConnector connector = new RedshiftConnector(
+ new RedshiftConnectorConfig("username", "password", "jdbc", "", "localhost", "db", 5432));
+
+ super.test(JDBC_DRIVER_CLASS_NAME, connector, "Failed to create connection to database via connection string: " +
+ "jdbc:redshift://localhost:5432/db and arguments: " +
+ "{user=username}. Error: ConnectException: Connection refused " +
+ "(Connection refused).");
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftPluginTestBase.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftPluginTestBase.java
new file mode 100644
index 000000000..5df4fb300
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftPluginTestBase.java
@@ -0,0 +1,218 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import com.google.common.base.Charsets;
+import com.google.common.base.Throwables;
+import com.google.common.collect.Sets;
+import io.cdap.cdap.api.artifact.ArtifactSummary;
+import io.cdap.cdap.api.plugin.PluginClass;
+import io.cdap.cdap.datapipeline.DataPipelineApp;
+import io.cdap.cdap.proto.id.ArtifactId;
+import io.cdap.cdap.proto.id.NamespaceId;
+import io.cdap.plugin.db.ConnectionConfig;
+import io.cdap.plugin.db.DBRecord;
+import io.cdap.plugin.db.batch.DatabasePluginTestBase;
+import io.cdap.plugin.db.sink.ETLDBOutputFormat;
+import io.cdap.plugin.db.source.DataDrivenETLDBInputFormat;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.math.BigDecimal;
+import java.sql.Connection;
+import java.sql.Date;
+import java.sql.Driver;
+import java.sql.DriverManager;
+import java.sql.PreparedStatement;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Time;
+import java.sql.Timestamp;
+import java.util.Arrays;
+import java.util.Calendar;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.TimeZone;
+
+/**
+ * Base test class for Redshift plugins.
+ */
+public abstract class RedshiftPluginTestBase extends DatabasePluginTestBase {
+ private static final Logger LOGGER = LoggerFactory.getLogger(RedshiftPluginTestBase.class);
+ protected static final ArtifactId DATAPIPELINE_ARTIFACT_ID = NamespaceId.DEFAULT.artifact("data-pipeline", "3.2.0");
+ protected static final ArtifactSummary DATAPIPELINE_ARTIFACT = new ArtifactSummary("data-pipeline", "3.2.0");
+ protected static final long CURRENT_TS = System.currentTimeMillis();
+
+ protected static final String JDBC_DRIVER_NAME = "redshift";
+ protected static final Map BASE_PROPS = new HashMap<>();
+
+ protected static String connectionUrl;
+ protected static int year;
+ protected static final int PRECISION = 10;
+ protected static final int SCALE = 6;
+ private static int startCount;
+
+ @BeforeClass
+ public static void setupTest() throws Exception {
+ if (startCount++ > 0) {
+ return;
+ }
+
+ getProperties();
+
+ Calendar calendar = Calendar.getInstance();
+ calendar.setTime(new Date(CURRENT_TS));
+ year = calendar.get(Calendar.YEAR);
+
+ setupBatchArtifacts(DATAPIPELINE_ARTIFACT_ID, DataPipelineApp.class);
+
+ addPluginArtifact(NamespaceId.DEFAULT.artifact(JDBC_DRIVER_NAME, "1.0.0"),
+ DATAPIPELINE_ARTIFACT_ID,
+ RedshiftSource.class, DBRecord.class,
+ ETLDBOutputFormat.class, DataDrivenETLDBInputFormat.class, DBRecord.class);
+
+ // add mysql 3rd party plugin
+ PluginClass mysqlDriver = new PluginClass(ConnectionConfig.JDBC_PLUGIN_TYPE, JDBC_DRIVER_NAME,
+ "redshift driver class", Driver.class.getName(),
+ null, Collections.emptyMap());
+ addPluginArtifact(NamespaceId.DEFAULT.artifact("redshift-jdbc-connector", "1.0.0"),
+ DATAPIPELINE_ARTIFACT_ID,
+ Sets.newHashSet(mysqlDriver), Driver.class);
+
+ TimeZone.setDefault(TimeZone.getTimeZone("UTC"));
+
+ connectionUrl = "jdbc:redshift://" + BASE_PROPS.get(ConnectionConfig.HOST) + ":" +
+ BASE_PROPS.get(ConnectionConfig.PORT) + "/" + BASE_PROPS.get(ConnectionConfig.DATABASE);
+ Connection conn = createConnection();
+ createTestTables(conn);
+ prepareTestData(conn);
+ }
+
+ private static void getProperties() {
+ BASE_PROPS.put(ConnectionConfig.HOST, getPropertyOrSkip("redshift.clusterEndpoint"));
+ BASE_PROPS.put(ConnectionConfig.PORT, getPropertyOrSkip("redshift.port"));
+ BASE_PROPS.put(ConnectionConfig.DATABASE, getPropertyOrSkip("redshift.database"));
+ BASE_PROPS.put(ConnectionConfig.USER, getPropertyOrSkip("redshift.username"));
+ BASE_PROPS.put(ConnectionConfig.PASSWORD, getPropertyOrSkip("redshift.password"));
+ BASE_PROPS.put(ConnectionConfig.JDBC_PLUGIN_NAME, JDBC_DRIVER_NAME);
+ }
+
+ protected static void createTestTables(Connection conn) throws SQLException {
+ try (Statement stmt = conn.createStatement()) {
+ // create a table that the action will truncate at the end of the run
+ stmt.execute("CREATE TABLE \"dbActionTest\" (x int, day varchar(10))");
+ // create a table that the action will truncate at the end of the run
+ stmt.execute("CREATE TABLE \"postActionTest\" (x int, day varchar(10))");
+
+ stmt.execute("CREATE TABLE my_table" +
+ "(" +
+ "\"ID\" INT NOT NULL," +
+ "\"NAME\" VARCHAR(40) NOT NULL," +
+ "\"SCORE\" REAL," +
+ "\"GRADUATED\" BOOLEAN," +
+ "\"NOT_IMPORTED\" VARCHAR(30)," +
+ "\"SMALLINT_COL\" SMALLINT," +
+ "\"BIG\" BIGINT," +
+ "\"NUMERIC_COL\" NUMERIC(" + PRECISION + "," + SCALE + ")," +
+ "\"DECIMAL_COL\" DECIMAL(" + PRECISION + "," + SCALE + ")," +
+ "\"DOUBLE_PREC_COL\" DOUBLE PRECISION," +
+ "\"DATE_COL\" DATE," +
+ "\"TIME_COL\" TIME," +
+ "\"TIMESTAMP_COL\" TIMESTAMP(3)," +
+ "\"TEXT_COL\" TEXT," +
+ "\"CHAR_COL\" CHAR(100)," +
+ "\"BYTEA_COL\" BYTEA" +
+ ")");
+ stmt.execute("CREATE TABLE \"MY_DEST_TABLE\" AS " +
+ "SELECT * FROM my_table");
+ stmt.execute("CREATE TABLE your_table AS " +
+ "SELECT * FROM my_table");
+ }
+ }
+
+ protected static void prepareTestData(Connection conn) throws SQLException {
+ try (
+ Statement stmt = conn.createStatement();
+ PreparedStatement pStmt1 =
+ conn.prepareStatement("INSERT INTO my_table " +
+ "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?," +
+ " ?, ?, ?, ?, ?, ?)");
+ PreparedStatement pStmt2 =
+ conn.prepareStatement("INSERT INTO your_table " +
+ "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?," +
+ " ?, ?, ?, ?, ?, ?)")) {
+
+ stmt.execute("insert into \"dbActionTest\" values (1, '1970-01-01')");
+ stmt.execute("insert into \"postActionTest\" values (1, '1970-01-01')");
+
+ populateData(pStmt1, pStmt2);
+ }
+ }
+
+ private static void populateData(PreparedStatement... stmts) throws SQLException {
+ // insert the same data into both tables: my_table and your_table
+ for (PreparedStatement pStmt : stmts) {
+ for (int i = 1; i <= 5; i++) {
+ String name = "user" + i;
+ pStmt.setInt(1, i);
+ pStmt.setString(2, name);
+ pStmt.setDouble(3, 123.45 + i);
+ pStmt.setBoolean(4, (i % 2 == 0));
+ pStmt.setString(5, "random" + i);
+ pStmt.setShort(6, (short) i);
+ pStmt.setLong(7, (long) i);
+ pStmt.setBigDecimal(8, new BigDecimal("123.45").add(new BigDecimal(i)));
+ pStmt.setBigDecimal(9, new BigDecimal("123.45").add(new BigDecimal(i)));
+ pStmt.setDouble(10, 123.45 + i);
+ pStmt.setDate(11, new Date(CURRENT_TS));
+ pStmt.setTime(12, new Time(CURRENT_TS));
+ pStmt.setTimestamp(13, new Timestamp(CURRENT_TS));
+ pStmt.setString(14, name);
+ pStmt.setString(15, "char" + i);
+ pStmt.setBytes(16, name.getBytes(Charsets.UTF_8));
+ pStmt.executeUpdate();
+ }
+ }
+ }
+
+ public static Connection createConnection() {
+ try {
+ Class.forName(Driver.class.getCanonicalName());
+ return DriverManager.getConnection(connectionUrl, BASE_PROPS.get(ConnectionConfig.USER),
+ BASE_PROPS.get(ConnectionConfig.PASSWORD));
+ } catch (Exception e) {
+ throw Throwables.propagate(e);
+ }
+ }
+
+ @AfterClass
+ public static void tearDownDB() {
+ try (Connection conn = createConnection();
+ Statement stmt = conn.createStatement()) {
+ executeCleanup(Arrays.asList(() -> stmt.execute("DROP TABLE my_table"),
+ () -> stmt.execute("DROP TABLE your_table"),
+ () -> stmt.execute("DROP TABLE postActionTest"),
+ () -> stmt.execute("DROP TABLE dbActionTest"),
+ () -> stmt.execute("DROP TABLE MY_DEST_TABLE")), LOGGER);
+ } catch (Exception e) {
+ LOGGER.warn("Fail to tear down.", e);
+ }
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftPluginTestSuite.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftPluginTestSuite.java
new file mode 100644
index 000000000..95ad0938b
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftPluginTestSuite.java
@@ -0,0 +1,31 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.cdap.common.test.TestSuite;
+import org.junit.runner.RunWith;
+import org.junit.runners.Suite;
+
+/**
+ * This is a test suite that runs all the tests for Redshift plugins.
+ */
+@RunWith(TestSuite.class)
+@Suite.SuiteClasses({
+ RedshiftSourceTestRun.class,
+})
+public class RedshiftPluginTestSuite extends RedshiftPluginTestBase {
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSchemaReaderTest.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSchemaReaderTest.java
new file mode 100644
index 000000000..206b4ae9f
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSchemaReaderTest.java
@@ -0,0 +1,131 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import com.google.common.collect.Lists;
+import io.cdap.cdap.api.data.schema.Schema;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Types;
+import java.util.List;
+
+@RunWith(MockitoJUnitRunner.class)
+public class RedshiftSchemaReaderTest {
+
+ @Test
+ public void testGetSchema() throws SQLException {
+ RedshiftSchemaReader schemaReader = new RedshiftSchemaReader();
+
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(metadata.getColumnTypeName(1)).thenReturn("timetz");
+ Mockito.when(metadata.getColumnType(1)).thenReturn(Types.TIMESTAMP);
+
+ Schema schema = schemaReader.getSchema(metadata, 1);
+
+ Assert.assertEquals(Schema.of(Schema.Type.STRING), schema);
+ }
+
+ @Test
+ public void testGetSchemaWithIntType() throws SQLException {
+ RedshiftSchemaReader schemaReader = new RedshiftSchemaReader();
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(metadata.getColumnTypeName(1)).thenReturn("INT");
+ Mockito.when(metadata.getColumnType(1)).thenReturn(Types.NUMERIC);
+ Schema schema = schemaReader.getSchema(metadata, 1);
+
+ Assert.assertEquals(Schema.of(Schema.Type.INT), schema);
+ }
+
+ @Test
+ public void testGetSchemaWithNumericTypeWithPrecision() throws SQLException {
+ RedshiftSchemaReader schemaReader = new RedshiftSchemaReader();
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(metadata.getColumnTypeName(1)).thenReturn("STRING");
+ Mockito.when(metadata.getColumnType(1)).thenReturn(Types.NUMERIC);
+ Mockito.when(metadata.getPrecision(1)).thenReturn(0);
+
+ Schema schema = schemaReader.getSchema(metadata, 1);
+
+ Assert.assertEquals(Schema.of(Schema.Type.STRING), schema);
+ }
+
+ @Test
+ public void testGetSchemaWithOtherTypes() throws SQLException {
+ RedshiftSchemaReader schemaReader = new RedshiftSchemaReader();
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(metadata.getColumnTypeName(1)).thenReturn("BIGINT");
+ Mockito.when(metadata.getColumnType(1)).thenReturn(Types.BIGINT);
+ Schema schema = schemaReader.getSchema(metadata, 1);
+
+ Assert.assertEquals(Schema.of(Schema.Type.LONG), schema);
+
+ Mockito.when(metadata.getColumnTypeName(2)).thenReturn("timestamp");
+ Mockito.when(metadata.getColumnType(2)).thenReturn(Types.TIMESTAMP);
+
+ schema = schemaReader.getSchema(metadata, 2);
+
+ Assert.assertEquals(Schema.of(Schema.LogicalType.DATETIME), schema);
+ }
+
+ @Test
+ public void testShouldIgnoreColumn() throws SQLException {
+ RedshiftSchemaReader schemaReader = new RedshiftSchemaReader("sessionID");
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+ Mockito.when(metadata.getColumnName(1)).thenReturn("c_sessionID");
+ Assert.assertTrue(schemaReader.shouldIgnoreColumn(metadata, 1));
+ Mockito.when(metadata.getColumnName(2)).thenReturn("sqn_sessionID");
+ Assert.assertTrue(schemaReader.shouldIgnoreColumn(metadata, 2));
+ Mockito.when(metadata.getColumnName(3)).thenReturn("columnName");
+ Assert.assertFalse(schemaReader.shouldIgnoreColumn(metadata, 3));
+ }
+
+ @Test
+ public void testGetSchemaFields() throws SQLException {
+ RedshiftSchemaReader schemaReader = new RedshiftSchemaReader();
+
+ ResultSet resultSet = Mockito.mock(ResultSet.class);
+ ResultSetMetaData metadata = Mockito.mock(ResultSetMetaData.class);
+
+ Mockito.when(resultSet.getMetaData()).thenReturn(metadata);
+
+ // Mock two columns with different types
+ Mockito.when(metadata.getColumnCount()).thenReturn(2);
+ Mockito.when(metadata.getColumnTypeName(1)).thenReturn("INT");
+ Mockito.when(metadata.getColumnType(1)).thenReturn(Types.NUMERIC);
+ Mockito.when(metadata.getColumnName(1)).thenReturn("column1");
+
+ Mockito.when(metadata.getColumnTypeName(2)).thenReturn("BIGINT");
+ Mockito.when(metadata.getColumnType(2)).thenReturn(Types.BIGINT);
+ Mockito.when(metadata.getColumnName(2)).thenReturn("column2");
+
+ List expectedSchemaFields = Lists.newArrayList();
+ expectedSchemaFields.add(Schema.Field.of("column1", Schema.nullableOf(Schema.of(Schema.Type.INT))));
+ expectedSchemaFields.add(Schema.Field.of("column2", Schema.nullableOf(Schema.of(Schema.Type.LONG))));
+
+ List actualSchemaFields = schemaReader.getSchemaFields(resultSet);
+
+ Assert.assertEquals(expectedSchemaFields.get(0).getName(), actualSchemaFields.get(0).getName());
+ Assert.assertEquals(expectedSchemaFields.get(1).getName(), actualSchemaFields.get(1).getName());
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSourceTest.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSourceTest.java
new file mode 100644
index 000000000..d09de8f0d
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSourceTest.java
@@ -0,0 +1,98 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import io.cdap.cdap.etl.api.batch.BatchSourceContext;
+import io.cdap.plugin.common.LineageRecorder;
+import io.cdap.plugin.db.SchemaReader;
+import org.apache.hadoop.mapreduce.lib.db.DBWritable;
+import org.junit.Assert;
+import org.junit.Test;
+import org.junit.runner.RunWith;
+import org.mockito.Mockito;
+import org.mockito.junit.MockitoJUnitRunner;
+
+import java.util.Map;
+
+@RunWith(MockitoJUnitRunner.class)
+public class RedshiftSourceTest {
+
+ @Test
+ public void testGetDBSpecificArguments() {
+ RedshiftConnectorConfig connectorConfig = new RedshiftConnectorConfig("username", "password",
+ "jdbcPluginName", "connectionArguments",
+ "host", "database", 1101);
+ RedshiftSource.RedshiftSourceConfig config = new RedshiftSource.RedshiftSourceConfig(false, connectorConfig);
+ Map dbSpecificArguments = config.getDBSpecificArguments();
+ Assert.assertEquals(0, dbSpecificArguments.size());
+ }
+
+ @Test
+ public void testGetFetchSize() {
+ RedshiftConnectorConfig connectorConfig = new RedshiftConnectorConfig("username", "password",
+ "jdbcPluginName", "connectionArguments",
+ "host", "database", 1101);
+ RedshiftSource.RedshiftSourceConfig config = new RedshiftSource.RedshiftSourceConfig(false, connectorConfig);
+ Integer fetchSize = config.getFetchSize();
+ Assert.assertEquals(1000, fetchSize.intValue());
+ }
+
+ @Test
+ public void testGetSchemaReader() {
+ RedshiftConnectorConfig connectorConfig = new RedshiftConnectorConfig("username", "password",
+ "jdbcPluginName", "connectionArguments",
+ "host", "database", 1101);
+ RedshiftSource source = new RedshiftSource(new RedshiftSource.RedshiftSourceConfig(false, connectorConfig));
+ SchemaReader schemaReader = source.getSchemaReader();
+ Assert.assertTrue(schemaReader instanceof RedshiftSchemaReader);
+ }
+
+ @Test
+ public void testGetDBRecordType() {
+ RedshiftConnectorConfig connectorConfig = new RedshiftConnectorConfig("username", "password",
+ "jdbcPluginName", "connectionArguments",
+ "host", "database", 1101);
+ RedshiftSource source = new RedshiftSource(new RedshiftSource.RedshiftSourceConfig(false, connectorConfig));
+ Class extends DBWritable> dbRecordType = source.getDBRecordType();
+ Assert.assertEquals(RedshiftDBRecord.class, dbRecordType);
+ }
+
+ @Test
+ public void testCreateConnectionString() {
+ RedshiftConnectorConfig connectorConfig = new RedshiftConnectorConfig("username", "password",
+ "jdbcPluginName", "connectionArguments",
+ "localhost", "test", 5439);
+ RedshiftSource.RedshiftSourceConfig config = new RedshiftSource.RedshiftSourceConfig(false, connectorConfig);
+
+ RedshiftSource source = new RedshiftSource(config);
+ String connectionString = source.createConnectionString();
+ Assert.assertEquals("jdbc:redshift://localhost:5439/test", connectionString);
+ }
+
+ @Test
+ public void testGetLineageRecorder() {
+ BatchSourceContext context = Mockito.mock(BatchSourceContext.class);
+ RedshiftConnectorConfig connectorConfig = new RedshiftConnectorConfig("username", "password",
+ "jdbcPluginName", "connectionArguments",
+ "host", "database", 1101);
+ RedshiftSource.RedshiftSourceConfig config = new RedshiftSource.RedshiftSourceConfig(false, connectorConfig);
+ RedshiftSource source = new RedshiftSource(config);
+
+ LineageRecorder lineageRecorder = source.getLineageRecorder(context);
+ Assert.assertNotNull(lineageRecorder);
+ }
+}
diff --git a/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSourceTestRun.java b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSourceTestRun.java
new file mode 100644
index 000000000..1ac41bcd0
--- /dev/null
+++ b/amazon-redshift-plugin/src/test/java/io/cdap/plugin/amazon/redshift/RedshiftSourceTestRun.java
@@ -0,0 +1,332 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+
+package io.cdap.plugin.amazon.redshift;
+
+import com.google.common.collect.ImmutableMap;
+import io.cdap.cdap.api.common.Bytes;
+import io.cdap.cdap.api.data.format.StructuredRecord;
+import io.cdap.cdap.api.dataset.table.Table;
+import io.cdap.cdap.etl.api.batch.BatchSource;
+import io.cdap.cdap.etl.mock.batch.MockSink;
+import io.cdap.cdap.etl.proto.v2.ETLBatchConfig;
+import io.cdap.cdap.etl.proto.v2.ETLPlugin;
+import io.cdap.cdap.etl.proto.v2.ETLStage;
+import io.cdap.cdap.proto.artifact.AppRequest;
+import io.cdap.cdap.proto.id.ApplicationId;
+import io.cdap.cdap.proto.id.NamespaceId;
+import io.cdap.cdap.test.ApplicationManager;
+import io.cdap.cdap.test.DataSetManager;
+import io.cdap.plugin.common.Constants;
+import io.cdap.plugin.db.ConnectionConfig;
+import io.cdap.plugin.db.DBConfig;
+import io.cdap.plugin.db.source.AbstractDBSource;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.math.BigDecimal;
+import java.math.MathContext;
+import java.nio.ByteBuffer;
+import java.sql.Date;
+import java.sql.Time;
+import java.text.SimpleDateFormat;
+import java.time.LocalDate;
+import java.time.LocalTime;
+import java.time.ZoneId;
+import java.time.ZoneOffset;
+import java.time.ZonedDateTime;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+/**
+ * Test for Redshift source plugin.
+ */
+public class RedshiftSourceTestRun extends RedshiftPluginTestBase {
+
+ @Test
+ @SuppressWarnings("ConstantConditions")
+ public void testDBMacroSupport() throws Exception {
+ String importQuery = "SELECT * FROM my_table WHERE \"DATE_COL\" <= '${logicalStartTime(yyyy-MM-dd,1d)}' " +
+ "AND $CONDITIONS";
+ String boundingQuery = "SELECT MIN(ID),MAX(ID) from my_table";
+ String splitBy = "ID";
+
+ ImmutableMap sourceProps = ImmutableMap.builder()
+ .putAll(BASE_PROPS)
+ .put(AbstractDBSource.DBSourceConfig.IMPORT_QUERY, importQuery)
+ .put(AbstractDBSource.DBSourceConfig.BOUNDING_QUERY, boundingQuery)
+ .put(AbstractDBSource.DBSourceConfig.SPLIT_BY, splitBy)
+ .put(Constants.Reference.REFERENCE_NAME, "DBTestSource").build();
+
+ ETLPlugin sourceConfig = new ETLPlugin(
+ RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE,
+ sourceProps
+ );
+
+ ETLPlugin sinkConfig = MockSink.getPlugin("macroOutputTable");
+
+ ApplicationManager appManager = deployETL(sourceConfig, sinkConfig,
+ DATAPIPELINE_ARTIFACT, "testDBMacro");
+ runETLOnce(appManager, ImmutableMap.of("logical.start.time", String.valueOf(CURRENT_TS)));
+
+ DataSetManager outputManager = getDataset("macroOutputTable");
+ Assert.assertTrue(MockSink.readOutput(outputManager).isEmpty());
+ }
+
+ @Test
+ @SuppressWarnings("ConstantConditions")
+ public void testDBSource() throws Exception {
+ String importQuery = "SELECT \"ID\", \"NAME\", \"SCORE\", \"GRADUATED\", \"SMALLINT_COL\", \"BIG\", " +
+ "\"NUMERIC_COL\", \"CHAR_COL\", \"DECIMAL_COL\", \"BYTEA_COL\", \"DATE_COL\", \"TIME_COL\", \"TIMESTAMP_COL\", " +
+ "\"TEXT_COL\", \"DOUBLE_PREC_COL\" FROM my_table " +
+ "WHERE \"ID\" < 3 AND $CONDITIONS";
+ String boundingQuery = "SELECT MIN(\"ID\"),MAX(\"ID\") from my_table";
+ String splitBy = "ID";
+ ETLPlugin sourceConfig = new ETLPlugin(
+ RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE,
+ ImmutableMap.builder()
+ .putAll(BASE_PROPS)
+ .put(AbstractDBSource.DBSourceConfig.IMPORT_QUERY, importQuery)
+ .put(AbstractDBSource.DBSourceConfig.BOUNDING_QUERY, boundingQuery)
+ .put(AbstractDBSource.DBSourceConfig.SPLIT_BY, splitBy)
+ .put(Constants.Reference.REFERENCE_NAME, "DBSourceTest")
+ .build(),
+ null
+ );
+
+ String outputDatasetName = "output-dbsourcetest";
+ ETLPlugin sinkConfig = MockSink.getPlugin(outputDatasetName);
+
+ ApplicationManager appManager = deployETL(sourceConfig, sinkConfig,
+ DATAPIPELINE_ARTIFACT, "testDBSource");
+ runETLOnce(appManager);
+
+ DataSetManager outputManager = getDataset(outputDatasetName);
+ List outputRecords = MockSink.readOutput(outputManager);
+
+ Assert.assertEquals(2, outputRecords.size());
+ String userid = outputRecords.get(0).get("NAME");
+ StructuredRecord row1 = "user1".equals(userid) ? outputRecords.get(0) : outputRecords.get(1);
+ StructuredRecord row2 = "user1".equals(userid) ? outputRecords.get(1) : outputRecords.get(0);
+
+ // Verify data
+ Assert.assertEquals("user1", row1.get("NAME"));
+ Assert.assertEquals("user2", row2.get("NAME"));
+ Assert.assertEquals("user1", row1.get("TEXT_COL"));
+ Assert.assertEquals("user2", row2.get("TEXT_COL"));
+ Assert.assertEquals("char1", ((String) row1.get("CHAR_COL")).trim());
+ Assert.assertEquals("char2", ((String) row2.get("CHAR_COL")).trim());
+ Assert.assertEquals(124.45f, ((Float) row1.get("SCORE")).doubleValue(), 0.000001);
+ Assert.assertEquals(125.45f, ((Float) row2.get("SCORE")).doubleValue(), 0.000001);
+ Assert.assertEquals(false, row1.get("GRADUATED"));
+ Assert.assertEquals(true, row2.get("GRADUATED"));
+ Assert.assertNull(row1.get("NOT_IMPORTED"));
+ Assert.assertNull(row2.get("NOT_IMPORTED"));
+
+ Assert.assertEquals(1, (int) row1.get("SMALLINT_COL"));
+ Assert.assertEquals(2, (int) row2.get("SMALLINT_COL"));
+ Assert.assertEquals(1, (long) row1.get("BIG"));
+ Assert.assertEquals(2, (long) row2.get("BIG"));
+
+ Assert.assertEquals(new BigDecimal("124.45", new MathContext(PRECISION)).setScale(SCALE),
+ row1.getDecimal("NUMERIC_COL"));
+ Assert.assertEquals(new BigDecimal("125.45", new MathContext(PRECISION)).setScale(SCALE),
+ row2.getDecimal("NUMERIC_COL"));
+ Assert.assertEquals(new BigDecimal("124.45", new MathContext(PRECISION)).setScale(SCALE),
+ row1.getDecimal("DECIMAL_COL"));
+
+ Assert.assertEquals(124.45, (double) row1.get("DOUBLE_PREC_COL"), 0.000001);
+ Assert.assertEquals(125.45, (double) row2.get("DOUBLE_PREC_COL"), 0.000001);
+ // Verify time columns
+ java.util.Date date = new java.util.Date(CURRENT_TS);
+ SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
+ LocalDate expectedDate = Date.valueOf(sdf.format(date)).toLocalDate();
+ sdf = new SimpleDateFormat("H:mm:ss");
+ LocalTime expectedTime = Time.valueOf(sdf.format(date)).toLocalTime();
+ ZonedDateTime expectedTs = date.toInstant().atZone(ZoneId.ofOffset("UTC", ZoneOffset.UTC));
+ Assert.assertEquals(expectedDate, row1.getDate("DATE_COL"));
+ Assert.assertEquals(expectedTime, row1.getTime("TIME_COL"));
+ Assert.assertEquals(expectedTs, row1.getTimestamp("TIMESTAMP_COL", ZoneId.ofOffset("UTC", ZoneOffset.UTC)));
+
+ // verify binary columns
+ Assert.assertEquals("user1", Bytes.toString(((ByteBuffer) row1.get("BYTEA_COL")).array(), 0, 5));
+ Assert.assertEquals("user2", Bytes.toString(((ByteBuffer) row2.get("BYTEA_COL")).array(), 0, 5));
+ }
+
+ @Test
+ public void testDbSourceMultipleTables() throws Exception {
+ String importQuery = "SELECT \"my_table\".\"ID\", \"your_table\".\"NAME\" FROM \"my_table\", \"your_table\"" +
+ "WHERE \"my_table\".\"ID\" < 3 and \"my_table\".\"ID\" = \"your_table\".\"ID\" and $CONDITIONS";
+ String boundingQuery = "SELECT MIN(MIN(\"my_table\".\"ID\"), MIN(\"your_table\".\"ID\")), " +
+ "MAX(MAX(\"my_table\".\"ID\"), MAX(\"your_table\".\"ID\"))";
+ String splitBy = "\"my_table\".\"ID\"";
+ ETLPlugin sourceConfig = new ETLPlugin(
+ RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE,
+ ImmutableMap.builder()
+ .putAll(BASE_PROPS)
+ .put(AbstractDBSource.DBSourceConfig.IMPORT_QUERY, importQuery)
+ .put(AbstractDBSource.DBSourceConfig.BOUNDING_QUERY, boundingQuery)
+ .put(AbstractDBSource.DBSourceConfig.SPLIT_BY, splitBy)
+ .put(Constants.Reference.REFERENCE_NAME, "DBMultipleTest")
+ .build(),
+ null
+ );
+
+ String outputDatasetName = "output-multitabletest";
+ ETLPlugin sinkConfig = MockSink.getPlugin(outputDatasetName);
+
+ ApplicationManager appManager = deployETL(sourceConfig, sinkConfig,
+ DATAPIPELINE_ARTIFACT, "testDBSourceWithMultipleTables");
+ runETLOnce(appManager);
+
+ // records should be written
+ DataSetManager outputManager = getDataset(outputDatasetName);
+ List outputRecords = MockSink.readOutput(outputManager);
+ Assert.assertEquals(2, outputRecords.size());
+ String userid = outputRecords.get(0).get("NAME");
+ StructuredRecord row1 = "user1".equals(userid) ? outputRecords.get(0) : outputRecords.get(1);
+ StructuredRecord row2 = "user1".equals(userid) ? outputRecords.get(1) : outputRecords.get(0);
+ // Verify data
+ Assert.assertEquals("user1", row1.get("NAME"));
+ Assert.assertEquals("user2", row2.get("NAME"));
+ Assert.assertEquals(1, row1.get("ID").intValue());
+ Assert.assertEquals(2, row2.get("ID").intValue());
+ }
+
+ @Test
+ public void testUserNamePasswordCombinations() throws Exception {
+ String importQuery = "SELECT * FROM \"my_table\" WHERE $CONDITIONS";
+ String boundingQuery = "SELECT MIN(\"ID\"),MAX(\"ID\") from \"my_table\"";
+ String splitBy = "\"ID\"";
+
+ ETLPlugin sinkConfig = MockSink.getPlugin("outputTable");
+
+ Map baseSourceProps = ImmutableMap.builder()
+ .put(ConnectionConfig.HOST, BASE_PROPS.get(ConnectionConfig.HOST))
+ .put(ConnectionConfig.PORT, BASE_PROPS.get(ConnectionConfig.PORT))
+ .put(ConnectionConfig.DATABASE, BASE_PROPS.get(ConnectionConfig.DATABASE))
+ .put(ConnectionConfig.JDBC_PLUGIN_NAME, JDBC_DRIVER_NAME)
+ .put(AbstractDBSource.DBSourceConfig.IMPORT_QUERY, importQuery)
+ .put(AbstractDBSource.DBSourceConfig.BOUNDING_QUERY, boundingQuery)
+ .put(AbstractDBSource.DBSourceConfig.SPLIT_BY, splitBy)
+ .put(Constants.Reference.REFERENCE_NAME, "UserPassDBTest")
+ .build();
+
+ ApplicationId appId = NamespaceId.DEFAULT.app("dbTest");
+
+ // null user name, null password. Should succeed.
+ // as source
+ ETLPlugin dbConfig = new ETLPlugin(RedshiftConstants.PLUGIN_NAME, BatchSource.PLUGIN_TYPE,
+ baseSourceProps, null);
+ ETLStage table = new ETLStage("uniqueTableSink", sinkConfig);
+ ETLStage database = new ETLStage("databaseSource", dbConfig);
+ ETLBatchConfig etlConfig = ETLBatchConfig.builder()
+ .addStage(database)
+ .addStage(table)
+ .addConnection(database.getName(), table.getName())
+ .build();
+ AppRequest appRequest = new AppRequest<>(DATAPIPELINE_ARTIFACT, etlConfig);
+ deployApplication(appId, appRequest);
+
+ // null user name, non-null password. Should fail.
+ // as source
+ Map noUser = new HashMap<>(baseSourceProps);
+ noUser.put(DBConfig.PASSWORD, "password");
+ database = new ETLStage("databaseSource", new ETLPlugin(RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE, noUser, null));
+ etlConfig = ETLBatchConfig.builder()
+ .addStage(database)
+ .addStage(table)
+ .addConnection(database.getName(), table.getName())
+ .build();
+ assertDeploymentFailure(appId, etlConfig, DATAPIPELINE_ARTIFACT,
+ "Deploying DB Source with null username but non-null password should have failed.");
+
+ // non-null username, non-null, but empty password. Should succeed.
+ // as source
+ Map emptyPassword = new HashMap<>(baseSourceProps);
+ emptyPassword.put(DBConfig.USER, "root");
+ emptyPassword.put(DBConfig.PASSWORD, "");
+ database = new ETLStage("databaseSource", new ETLPlugin(RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE, emptyPassword, null));
+ etlConfig = ETLBatchConfig.builder()
+ .addStage(database)
+ .addStage(table)
+ .addConnection(database.getName(), table.getName())
+ .build();
+ appRequest = new AppRequest<>(DATAPIPELINE_ARTIFACT, etlConfig);
+ deployApplication(appId, appRequest);
+ }
+
+ @Test
+ public void testNonExistentDBTable() throws Exception {
+ // source
+ String importQuery = "SELECT \"ID\", \"NAME\" FROM \"dummy\" WHERE ID < 3 AND $CONDITIONS";
+ String boundingQuery = "SELECT MIN(\"ID\"),MAX(\"ID\") FROM \"dummy\"";
+ String splitBy = "\"ID\"";
+ ETLPlugin sinkConfig = MockSink.getPlugin("table");
+ ETLPlugin sourceBadNameConfig = new ETLPlugin(
+ RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE,
+ ImmutableMap.builder()
+ .putAll(BASE_PROPS)
+ .put(AbstractDBSource.DBSourceConfig.IMPORT_QUERY, importQuery)
+ .put(AbstractDBSource.DBSourceConfig.BOUNDING_QUERY, boundingQuery)
+ .put(AbstractDBSource.DBSourceConfig.SPLIT_BY, splitBy)
+ .put(Constants.Reference.REFERENCE_NAME, "DBNonExistentTest")
+ .build(),
+ null);
+ ETLStage sink = new ETLStage("sink", sinkConfig);
+ ETLStage sourceBadName = new ETLStage("sourceBadName", sourceBadNameConfig);
+
+ ETLBatchConfig etlConfig = ETLBatchConfig.builder()
+ .addStage(sourceBadName)
+ .addStage(sink)
+ .addConnection(sourceBadName.getName(), sink.getName())
+ .build();
+ ApplicationId appId = NamespaceId.DEFAULT.app("dbSourceNonExistingTest");
+ assertDeployAppFailure(appId, etlConfig, DATAPIPELINE_ARTIFACT);
+
+ // Bad connection
+ ETLPlugin sourceBadConnConfig = new ETLPlugin(
+ RedshiftConstants.PLUGIN_NAME,
+ BatchSource.PLUGIN_TYPE,
+ ImmutableMap.builder()
+ .put(ConnectionConfig.HOST, BASE_PROPS.get(ConnectionConfig.HOST))
+ .put(ConnectionConfig.PORT, BASE_PROPS.get(ConnectionConfig.PORT))
+ .put(ConnectionConfig.DATABASE, "dumDB")
+ .put(ConnectionConfig.USER, BASE_PROPS.get(ConnectionConfig.USER))
+ .put(ConnectionConfig.PASSWORD, BASE_PROPS.get(ConnectionConfig.PASSWORD))
+ .put(ConnectionConfig.JDBC_PLUGIN_NAME, JDBC_DRIVER_NAME)
+ .put(AbstractDBSource.DBSourceConfig.IMPORT_QUERY, importQuery)
+ .put(AbstractDBSource.DBSourceConfig.BOUNDING_QUERY, boundingQuery)
+ .put(AbstractDBSource.DBSourceConfig.SPLIT_BY, splitBy)
+ .put(Constants.Reference.REFERENCE_NAME, "RedshiftTest")
+ .build(),
+ null);
+ ETLStage sourceBadConn = new ETLStage("sourceBadConn", sourceBadConnConfig);
+ etlConfig = ETLBatchConfig.builder()
+ .addStage(sourceBadConn)
+ .addStage(sink)
+ .addConnection(sourceBadConn.getName(), sink.getName())
+ .build();
+ assertDeployAppFailure(appId, etlConfig, DATAPIPELINE_ARTIFACT);
+ }
+}
diff --git a/amazon-redshift-plugin/widgets/Redshift-batchsource.json b/amazon-redshift-plugin/widgets/Redshift-batchsource.json
new file mode 100644
index 000000000..943e2d24e
--- /dev/null
+++ b/amazon-redshift-plugin/widgets/Redshift-batchsource.json
@@ -0,0 +1,240 @@
+{
+ "metadata": {
+ "spec-version": "1.5"
+ },
+ "display-name": "Redshift",
+ "configuration-groups": [
+ {
+ "label": "Connection",
+ "properties": [
+ {
+ "widget-type": "toggle",
+ "label": "Use connection",
+ "name": "useConnection",
+ "widget-attributes": {
+ "on": {
+ "value": "true",
+ "label": "YES"
+ },
+ "off": {
+ "value": "false",
+ "label": "NO"
+ },
+ "default": "false"
+ }
+ },
+ {
+ "widget-type": "connection-select",
+ "label": "Connection",
+ "name": "connection",
+ "widget-attributes": {
+ "connectionType": "Redshift"
+ }
+ },
+ {
+ "widget-type": "plugin-list",
+ "label": "JDBC Driver name",
+ "name": "jdbcPluginName",
+ "widget-attributes": {
+ "plugin-type": "jdbc"
+ }
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Host",
+ "name": "host",
+ "widget-attributes": {
+ "placeholder": "Redshift endpoint host name."
+ }
+ },
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "5439"
+ }
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Username",
+ "name": "user"
+ },
+ {
+ "widget-type": "password",
+ "label": "Password",
+ "name": "password"
+ },
+ {
+ "widget-type": "keyvalue",
+ "label": "Connection Arguments",
+ "name": "connectionArguments",
+ "widget-attributes": {
+ "showDelimiter": "false",
+ "key-placeholder": "Key",
+ "value-placeholder": "Value",
+ "kv-delimiter" : "=",
+ "delimiter" : ";"
+ }
+ }
+ ]
+ },
+ {
+ "label": "Basic",
+ "properties": [
+ {
+ "widget-type": "textbox",
+ "label": "Reference Name",
+ "name": "referenceName",
+ "widget-attributes": {
+ "placeholder": "Name used to identify this source for lineage. Typically, the name of the table/view."
+ }
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Database",
+ "name": "database"
+ },
+ {
+ "widget-type": "connection-browser",
+ "widget-category": "plugin",
+ "widget-attributes": {
+ "connectionType": "Redshift",
+ "label": "Browse Database"
+ }
+ }
+ ]
+ },
+ {
+ "label": "SQL Query",
+ "properties": [
+ {
+ "widget-type": "textarea",
+ "label": "Import Query",
+ "name": "importQuery",
+ "widget-attributes": {
+ "rows": "4"
+ }
+ },
+ {
+ "widget-type": "get-schema",
+ "widget-category": "plugin"
+ }
+ ]
+ },
+ {
+ "label": "Advanced",
+ "properties": [
+ {
+ "widget-type": "textarea",
+ "label": "Bounding Query",
+ "name": "boundingQuery",
+ "widget-attributes": {
+ "rows": "4"
+ }
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Split-By Field Name",
+ "name": "splitBy"
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Number of Splits",
+ "name": "numSplits",
+ "widget-attributes": {
+ "default": "1"
+ }
+ },
+ {
+ "widget-type": "number",
+ "label": "Fetch Size",
+ "name": "fetchSize",
+ "widget-attributes": {
+ "default": "1000",
+ "minimum": "0"
+ }
+ }
+ ]
+ }
+ ],
+ "outputs": [
+ {
+ "name": "schema",
+ "widget-type": "schema",
+ "widget-attributes": {
+ "schema-types": [
+ "boolean",
+ "int",
+ "long",
+ "float",
+ "double",
+ "bytes",
+ "string"
+ ],
+ "schema-default-type": "string"
+ }
+ }
+ ],
+ "filters": [
+ {
+ "name": "showConnectionProperties ",
+ "condition": {
+ "expression": "useConnection == false"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "jdbcPluginName"
+ },
+ {
+ "type": "property",
+ "name": "instanceType"
+ },
+ {
+ "type": "property",
+ "name": "host"
+ },
+ {
+ "type": "property",
+ "name": "port"
+ },
+ {
+ "type": "property",
+ "name": "user"
+ },
+ {
+ "type": "property",
+ "name": "password"
+ },
+ {
+ "type": "property",
+ "name": "database"
+ },
+ {
+ "type": "property",
+ "name": "connectionArguments"
+ }
+ ]
+ },
+ {
+ "name": "showConnectionId",
+ "condition": {
+ "expression": "useConnection == true"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "connection"
+ }
+ ]
+ },
+ ],
+ "jump-config": {
+ "datasets": [
+ {
+ "ref-property-name": "referenceName"
+ }
+ ]
+ }
+}
diff --git a/amazon-redshift-plugin/widgets/Redshift-connector.json b/amazon-redshift-plugin/widgets/Redshift-connector.json
new file mode 100644
index 000000000..3a2af8e01
--- /dev/null
+++ b/amazon-redshift-plugin/widgets/Redshift-connector.json
@@ -0,0 +1,75 @@
+{
+ "metadata": {
+ "spec-version": "1.0"
+ },
+ "display-name": "Redshift",
+ "configuration-groups": [
+ {
+ "label": "Basic",
+ "properties": [
+ {
+ "widget-type": "plugin-list",
+ "label": "JDBC Driver name",
+ "name": "jdbcPluginName",
+ "widget-attributes": {
+ "plugin-type": "jdbc"
+ }
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Host",
+ "name": "host",
+ "widget-attributes": {
+ "default": "localhost"
+ }
+ },
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "5439"
+ }
+ },
+ {
+ "widget-type": "textbox",
+ "label": "Database",
+ "name": "database"
+ }
+ ]
+ },
+ {
+ "label": "Credentials",
+ "properties": [
+ {
+ "widget-type": "textbox",
+ "label": "Username",
+ "name": "user"
+ },
+ {
+ "widget-type": "password",
+ "label": "Password",
+ "name": "password"
+ }
+ ]
+ },
+ {
+ "label": "Advanced",
+ "properties": [
+ {
+ "widget-type": "keyvalue",
+ "label": "Connection Arguments",
+ "name": "connectionArguments",
+ "widget-attributes": {
+ "showDelimiter": "false",
+ "key-placeholder": "Key",
+ "value-placeholder": "Value",
+ "kv-delimiter": "=",
+ "delimiter": ";"
+ }
+ }
+ ]
+ }
+ ],
+ "outputs": []
+}
diff --git a/aurora-mysql-plugin/pom.xml b/aurora-mysql-plugin/pom.xml
index 3562d56e0..f5629873c 100644
--- a/aurora-mysql-plugin/pom.xml
+++ b/aurora-mysql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Aurora DB MySQL plugin
diff --git a/aurora-postgresql-plugin/pom.xml b/aurora-postgresql-plugin/pom.xml
index 07e2b60f1..35df3a6cf 100644
--- a/aurora-postgresql-plugin/pom.xml
+++ b/aurora-postgresql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Aurora DB PostgreSQL plugin
diff --git a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-action.md b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-action.md
index b105453bf..a72ec526f 100644
--- a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-action.md
+++ b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-action.md
@@ -23,6 +23,8 @@ Properties
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that MySQL is running on.
+
**CloudSQL Instance Type:** Whether the CloudSQL instance to connect to is private or public. Defaults to 'Public'.
**Username:** User identity for connecting to the specified database.
diff --git a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsink.md b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsink.md
index 2fa240ddf..eaf9e5535 100644
--- a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsink.md
+++ b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsink.md
@@ -32,6 +32,8 @@ You also can use the macro function ${conn(connection-name)}.
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that MySQL is running on.
+
**CloudSQL Instance Type:** Whether the CloudSQL instance to connect to is private or public. Defaults to 'Public'.
**Table Name:** Name of the table to export to. Table must exist prior to running the pipeline.
diff --git a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsource.md b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsource.md
index 1c656ad51..52a5945e7 100644
--- a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsource.md
+++ b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-batchsource.md
@@ -31,6 +31,8 @@ You also can use the macro function ${conn(connection-name)}.
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that MySQL is running on.
+
**CloudSQL Instance Type:** Whether the CloudSQL instance to connect to is private or public. Defaults to 'Public'.
**Import Query:** The SELECT query to use to import data from the specified table.
diff --git a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-connector.md b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-connector.md
index 7ddcaedbb..3197760e0 100644
--- a/cloudsql-mysql-plugin/docs/CloudSQLMySQL-connector.md
+++ b/cloudsql-mysql-plugin/docs/CloudSQLMySQL-connector.md
@@ -18,6 +18,8 @@ Properties
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that MySQL is running on.
+
**Database:** MySQL database name.
**Username:** User identity for connecting to the specified database. Required for databases that need
diff --git a/cloudsql-mysql-plugin/pom.xml b/cloudsql-mysql-plugin/pom.xml
index d5eaabd78..33b0d06f4 100644
--- a/cloudsql-mysql-plugin/pom.xml
+++ b/cloudsql-mysql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
CloudSQL MySQL plugin
@@ -45,7 +45,7 @@
io.cdap.plugin
mysql-plugin
- 1.10.0-SNAPSHOT
+ ${project.version}
diff --git a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLAction.java b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLAction.java
index 551102118..0608edb75 100644
--- a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLAction.java
+++ b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLAction.java
@@ -18,11 +18,13 @@
import com.google.common.collect.ImmutableMap;
import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Macro;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.cdap.api.annotation.Plugin;
import io.cdap.cdap.etl.api.FailureCollector;
import io.cdap.cdap.etl.api.PipelineConfigurer;
import io.cdap.cdap.etl.api.action.Action;
+import io.cdap.plugin.db.ConnectionConfig;
import io.cdap.plugin.db.action.AbstractDBAction;
import io.cdap.plugin.db.action.QueryConfig;
import io.cdap.plugin.util.CloudSQLUtil;
@@ -48,11 +50,13 @@ public CloudSQLMySQLAction(CloudSQLMySQLActionConfig cloudsqlMysqlActionConfig)
@Override
public void configurePipeline(PipelineConfigurer pipelineConfigurer) {
FailureCollector failureCollector = pipelineConfigurer.getStageConfigurer().getFailureCollector();
-
- CloudSQLUtil.checkConnectionName(
- failureCollector,
- cloudsqlMysqlActionConfig.instanceType,
- cloudsqlMysqlActionConfig.connectionName);
+
+ if (cloudsqlMysqlActionConfig.canConnect()) {
+ CloudSQLUtil.checkConnectionName(
+ failureCollector,
+ cloudsqlMysqlActionConfig.instanceType,
+ cloudsqlMysqlActionConfig.connectionName);
+ }
super.configurePipeline(pipelineConfigurer);
}
@@ -69,10 +73,18 @@ public CloudSQLMySQLActionConfig() {
"The CloudSQL instance to connect to. For a public instance, the connection string should be in the format "
+ ":: which can be found in the instance overview page. For a private "
+ "instance, enter the internal IP address of the Compute Engine VM cloudsql proxy is running on.")
+ @Macro
public String connectionName;
+ @Name(ConnectionConfig.PORT)
+ @Description("Database port number")
+ @Macro
+ @Nullable
+ private Integer port;
+
@Name(DATABASE)
@Description("Database name to connect to")
+ @Macro
public String database;
@Name(CloudSQLMySQLConstants.CONNECTION_TIMEOUT)
@@ -94,6 +106,7 @@ public String getConnectionString() {
return String.format(
CloudSQLMySQLConstants.PRIVATE_CLOUDSQL_MYSQL_CONNECTION_STRING_FORMAT,
connectionName,
+ getPort(),
database);
}
@@ -103,10 +116,19 @@ public String getConnectionString() {
connectionName);
}
+ public int getPort() {
+ return port == null ? 3306 : port;
+ }
+
@Override
public Map getDBSpecificArguments() {
return ImmutableMap.of(
CloudSQLMySQLConstants.CONNECTION_TIMEOUT, String.valueOf(connectionTimeout));
}
+
+ public boolean canConnect() {
+ return !containsMacro(CloudSQLUtil.CONNECTION_NAME) && !containsMacro(ConnectionConfig.PORT) &&
+ !containsMacro(DATABASE);
+ }
}
}
diff --git a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorConfig.java b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorConfig.java
index 42b3227ab..1e89d5a95 100644
--- a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorConfig.java
+++ b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorConfig.java
@@ -17,6 +17,7 @@
package io.cdap.plugin.cloudsql.mysql;
import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Macro;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.plugin.db.ConnectionConfig;
import io.cdap.plugin.db.connector.AbstractDBConnectorConfig;
@@ -38,10 +39,18 @@ public class CloudSQLMySQLConnectorConfig extends AbstractDBConnectorConfig {
"The CloudSQL instance to connect to. For a public instance, the connection string should be in the format "
+ ":: which can be found in the instance overview page. For a private "
+ "instance, enter the internal IP address of the Compute Engine VM cloudsql proxy is running on.")
+ @Macro
private String connectionName;
+ @Name(ConnectionConfig.PORT)
+ @Description("Database port number")
+ @Macro
+ @Nullable
+ private Integer port;
+
@Name(ConnectionConfig.DATABASE)
@Description("Database name to connect to")
+ @Macro
private String database;
@Name(CloudSQLUtil.INSTANCE_TYPE)
@@ -49,7 +58,8 @@ public class CloudSQLMySQLConnectorConfig extends AbstractDBConnectorConfig {
private String instanceType;
public CloudSQLMySQLConnectorConfig(String user, String password, String jdbcPluginName, String connectionArguments,
- String instanceType, String connectionName, String database) {
+ String instanceType, String connectionName, String database,
+ @Nullable Integer port) {
this.user = user;
this.password = password;
this.jdbcPluginName = jdbcPluginName;
@@ -57,6 +67,7 @@ public CloudSQLMySQLConnectorConfig(String user, String password, String jdbcPlu
this.instanceType = instanceType;
this.connectionName = connectionName;
this.database = database;
+ this.port = port;
}
public String getDatabase() {
@@ -71,12 +82,17 @@ public String getConnectionName() {
return connectionName;
}
+ public int getPort() {
+ return port == null ? 3306 : port;
+ }
+
@Override
public String getConnectionString() {
if (CloudSQLUtil.PRIVATE_INSTANCE.equalsIgnoreCase(instanceType)) {
return String.format(
CloudSQLMySQLConstants.PRIVATE_CLOUDSQL_MYSQL_CONNECTION_STRING_FORMAT,
connectionName,
+ getPort(),
database);
}
@@ -93,4 +109,10 @@ public Properties getConnectionArgumentsProperties() {
properties.put(JDBC_PROPERTY_SOCKET_TIMEOUT_MILLIS, "20000");
return properties;
}
+
+ @Override
+ public boolean canConnect() {
+ return super.canConnect() && !containsMacro(CloudSQLUtil.CONNECTION_NAME) &&
+ !containsMacro(ConnectionConfig.PORT) && !containsMacro(ConnectionConfig.DATABASE);
+ }
}
diff --git a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConstants.java b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConstants.java
index ae8a34c6a..c4b0d3b0f 100644
--- a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConstants.java
+++ b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConstants.java
@@ -26,5 +26,5 @@ private CloudSQLMySQLConstants() {
public static final String CONNECTION_TIMEOUT = "connectionTimeout";
public static final String PUBLIC_CLOUDSQL_MYSQL_CONNECTION_STRING_FORMAT =
"jdbc:mysql:///%s?cloudSqlInstance=%s&socketFactory=com.google.cloud.sql.mysql.SocketFactory";
- public static final String PRIVATE_CLOUDSQL_MYSQL_CONNECTION_STRING_FORMAT = "jdbc:mysql://%s/%s";
+ public static final String PRIVATE_CLOUDSQL_MYSQL_CONNECTION_STRING_FORMAT = "jdbc:mysql://%s:%s/%s";
}
diff --git a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSink.java b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSink.java
index ad7f63b1e..271012f7e 100644
--- a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSink.java
+++ b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSink.java
@@ -91,7 +91,8 @@ protected LineageRecorder getLineageRecorder(BatchSinkContext context) {
host = connectionParams[2];
location = connectionParams[1];
}
- String fqn = DBUtils.constructFQN("mysql", host, 3306,
+ String fqn = DBUtils.constructFQN("mysql", host,
+ cloudsqlMysqlSinkConfig.getConnection().getPort(),
cloudsqlMysqlSinkConfig.getConnection().getDatabase(),
cloudsqlMysqlSinkConfig.getReferenceName());
Asset.Builder assetBuilder = Asset.builder(cloudsqlMysqlSinkConfig.getReferenceName()).setFqn(fqn);
diff --git a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSource.java b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSource.java
index 50e2c3b7a..b8b6fbf27 100644
--- a/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSource.java
+++ b/cloudsql-mysql-plugin/src/main/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLSource.java
@@ -86,6 +86,7 @@ protected String createConnectionString() {
return String.format(
CloudSQLMySQLConstants.PRIVATE_CLOUDSQL_MYSQL_CONNECTION_STRING_FORMAT,
cloudsqlMysqlSourceConfig.connection.getConnectionName(),
+ cloudsqlMysqlSourceConfig.connection.getPort(),
cloudsqlMysqlSourceConfig.connection.getDatabase());
}
@@ -108,7 +109,8 @@ protected LineageRecorder getLineageRecorder(BatchSourceContext context) {
host = connectionParams[2];
location = connectionParams[1];
}
- String fqn = DBUtils.constructFQN("mysql", host, 3306,
+ String fqn = DBUtils.constructFQN("mysql", host,
+ cloudsqlMysqlSourceConfig.getConnection().getPort(),
cloudsqlMysqlSourceConfig.getConnection().getDatabase(),
cloudsqlMysqlSourceConfig.getReferenceName());
Asset.Builder assetBuilder = Asset.builder(cloudsqlMysqlSourceConfig.getReferenceName()).setFqn(fqn);
diff --git a/cloudsql-mysql-plugin/src/test/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorTest.java b/cloudsql-mysql-plugin/src/test/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorTest.java
index f8fc20b70..610b138dd 100644
--- a/cloudsql-mysql-plugin/src/test/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorTest.java
+++ b/cloudsql-mysql-plugin/src/test/java/io/cdap/plugin/cloudsql/mysql/CloudSQLMySQLConnectorTest.java
@@ -54,7 +54,7 @@ public void test() throws IOException, ClassNotFoundException, InstantiationExce
test(
new CloudSQLMySQLConnector(
new CloudSQLMySQLConnectorConfig(username, password, JDBC_PLUGIN_NAME, connectionArguments, instanceType,
- connectionName, database)
+ connectionName, database, null)
),
JDBC_DRIVER_CLASS_NAME,
CloudSQLMySQLConstants.PLUGIN_NAME
diff --git a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-action.json b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-action.json
index 42405fcce..66d6ebb85 100644
--- a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-action.json
+++ b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-action.json
@@ -74,11 +74,11 @@
}
},
{
- "widget-type": "textbox",
- "label": "Instance Name",
- "name": "instanceName",
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
"widget-attributes": {
- "placeholder": "CloudSQL instance connection name"
+ "default": "3306"
}
},
{
@@ -113,5 +113,19 @@
}
]
}
+ ],
+ "filters": [
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
+ }
]
}
diff --git a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsink.json b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsink.json
index fd628cf14..f68ebe6ff 100644
--- a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsink.json
+++ b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsink.json
@@ -66,6 +66,14 @@
"placeholder": "CloudSQL instance connection name"
}
},
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "3306"
+ }
+ },
{
"widget-type": "textbox",
"label": "Username",
@@ -201,6 +209,18 @@
"name": "connection"
}
]
+ },
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
}
],
"outputs": [],
diff --git a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsource.json b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsource.json
index ef75a8a29..4ac7747f4 100644
--- a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsource.json
+++ b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-batchsource.json
@@ -66,6 +66,14 @@
"placeholder": "CloudSQL instance connection name"
}
},
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "3306"
+ }
+ },
{
"widget-type": "textbox",
"label": "Username",
@@ -231,6 +239,18 @@
"name": "connection"
}
]
+ },
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
}
],
"jump-config": {
diff --git a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-connector.json b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-connector.json
index 1f8551605..b5c2c9993 100644
--- a/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-connector.json
+++ b/cloudsql-mysql-plugin/widgets/CloudSQLMySQL-connector.json
@@ -46,6 +46,14 @@
"widget-attributes": {
"placeholder": "CloudSQL instance connection name"
}
+ },
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "3306"
+ }
}
]
},
@@ -88,5 +96,19 @@
]
}
],
- "outputs": []
+ "outputs": [],
+ "filters": [
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
+ }
+ ]
}
diff --git a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-action.md b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-action.md
index 7ad2f51b4..c8aefd58a 100644
--- a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-action.md
+++ b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-action.md
@@ -23,6 +23,8 @@ Properties
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that PostgreSQL is running on.
+
**CloudSQL Instance Type:** Whether the CloudSQL instance to connect to is private or public. Defaults to 'Public'.
**Username:** User identity for connecting to the specified database.
diff --git a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsink.md b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsink.md
index 079d5df32..338a67c9e 100644
--- a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsink.md
+++ b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsink.md
@@ -32,6 +32,8 @@ You also can use the macro function ${conn(connection-name)}.
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that PostgreSQL is running on.
+
**CloudSQL Instance Type:** Whether the CloudSQL instance to connect to is private or public. Defaults to 'Public'.
**Table Name:** Name of the table to export to.
@@ -148,6 +150,7 @@ Please, refer to PostgreSQL data types documentation to figure out proper format
| double precision | double | |
| integer | int | |
| numeric(precision, scale)/decimal(precision, scale) | decimal | |
+| numeric(with 0 precision) | string | |
| real | float | |
| smallint | int | |
| text | string | |
diff --git a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsource.md b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsource.md
index 3c3bd989e..8d9ad7171 100644
--- a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsource.md
+++ b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-batchsource.md
@@ -31,6 +31,8 @@ You also can use the macro function ${conn(connection-name)}.
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that PostgreSQL is running on.
+
**CloudSQL Instance Type:** Whether the CloudSQL instance to connect to is private or public. Defaults to 'Public'.
**Import Query:** The SELECT query to use to import data from the specified table.
@@ -172,6 +174,7 @@ Please, refer to PostgreSQL data types documentation to figure out proper format
| double precision | double | |
| integer | int | |
| numeric(precision, scale)/decimal(precision, scale) | decimal | |
+| numeric(with 0 precision) | string | |
| real | float | |
| smallint | int | |
| smallserial | int | |
diff --git a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-connector.md b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-connector.md
index 4bee117af..0e502fefd 100644
--- a/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-connector.md
+++ b/cloudsql-postgresql-plugin/docs/CloudSQLPostgreSQL-connector.md
@@ -18,6 +18,8 @@ Properties
**Connection Name:** The CloudSQL instance to connect to in the format :\:.
Can be found in the instance overview page.
+**Port:** Port that PostgreSQL is running on.
+
**Database:** CloudSQL PostgreSQL database name.
**Username:** User identity for connecting to the specified database. Required for databases that need
diff --git a/cloudsql-postgresql-plugin/pom.xml b/cloudsql-postgresql-plugin/pom.xml
index 5107aba66..078ec8c4d 100644
--- a/cloudsql-postgresql-plugin/pom.xml
+++ b/cloudsql-postgresql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
CloudSQL PostgreSQL plugin
diff --git a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLAction.java b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLAction.java
index 072ff2c8f..1a3f8ad7b 100644
--- a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLAction.java
+++ b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLAction.java
@@ -18,11 +18,13 @@
import com.google.common.collect.ImmutableMap;
import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Macro;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.cdap.api.annotation.Plugin;
import io.cdap.cdap.etl.api.FailureCollector;
import io.cdap.cdap.etl.api.PipelineConfigurer;
import io.cdap.cdap.etl.api.action.Action;
+import io.cdap.plugin.db.ConnectionConfig;
import io.cdap.plugin.db.action.AbstractDBAction;
import io.cdap.plugin.db.action.QueryConfig;
import io.cdap.plugin.util.CloudSQLUtil;
@@ -48,11 +50,13 @@ public CloudSQLPostgreSQLAction(CloudSQLPostgreSQLActionConfig cloudsqlPostgresq
@Override
public void configurePipeline(PipelineConfigurer pipelineConfigurer) {
FailureCollector failureCollector = pipelineConfigurer.getStageConfigurer().getFailureCollector();
-
- CloudSQLUtil.checkConnectionName(
- failureCollector,
- cloudsqlPostgresqlActionConfig.instanceType,
- cloudsqlPostgresqlActionConfig.connectionName);
+
+ if (cloudsqlPostgresqlActionConfig.canConnect()) {
+ CloudSQLUtil.checkConnectionName(
+ failureCollector,
+ cloudsqlPostgresqlActionConfig.instanceType,
+ cloudsqlPostgresqlActionConfig.connectionName);
+ }
super.configurePipeline(pipelineConfigurer);
}
@@ -69,10 +73,18 @@ public CloudSQLPostgreSQLActionConfig() {
"The CloudSQL instance to connect to. For a public instance, the connection string should be in the format "
+ ":: which can be found in the instance overview page. For a private "
+ "instance, enter the internal IP address of the Compute Engine VM cloudsql proxy is running on.")
+ @Macro
public String connectionName;
+ @Name(ConnectionConfig.PORT)
+ @Description("Database port number")
+ @Macro
+ @Nullable
+ private Integer port;
+
@Name(DATABASE)
@Description("Database name to connect to")
+ @Macro
public String database;
@Name(CloudSQLPostgreSQLConstants.CONNECTION_TIMEOUT)
@@ -94,6 +106,7 @@ public String getConnectionString() {
return String.format(
CloudSQLPostgreSQLConstants.PRIVATE_CLOUDSQL_POSTGRES_CONNECTION_STRING_FORMAT,
connectionName,
+ getPort(),
database);
}
@@ -103,10 +116,19 @@ public String getConnectionString() {
connectionName);
}
+ public int getPort() {
+ return port == null ? 5432 : port;
+ }
+
@Override
public Map getDBSpecificArguments() {
return ImmutableMap.of(
CloudSQLPostgreSQLConstants.CONNECTION_TIMEOUT, String.valueOf(connectionTimeout));
}
+
+ public boolean canConnect() {
+ return !containsMacro(CloudSQLUtil.CONNECTION_NAME) && !containsMacro(ConnectionConfig.PORT) &&
+ !containsMacro(DATABASE);
+ }
}
}
diff --git a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorConfig.java b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorConfig.java
index d3ec0849b..30effd350 100644
--- a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorConfig.java
+++ b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorConfig.java
@@ -17,6 +17,7 @@
package io.cdap.plugin.cloudsql.postgres;
import io.cdap.cdap.api.annotation.Description;
+import io.cdap.cdap.api.annotation.Macro;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.plugin.db.ConnectionConfig;
import io.cdap.plugin.db.connector.AbstractDBConnectorConfig;
@@ -34,10 +35,18 @@ public class CloudSQLPostgreSQLConnectorConfig extends AbstractDBConnectorConfig
"The CloudSQL instance to connect to. For a public instance, the connection string should be in the format "
+ ":: which can be found in the instance overview page. For a private "
+ "instance, enter the internal IP address of the Compute Engine VM cloudsql proxy is running on.")
+ @Macro
private String connectionName;
+ @Name(ConnectionConfig.PORT)
+ @Description("Database port number")
+ @Macro
+ @Nullable
+ private Integer port;
+
@Name(ConnectionConfig.DATABASE)
@Description("Database name to connect to")
+ @Macro
private String database;
@Name(CloudSQLUtil.INSTANCE_TYPE)
@@ -46,7 +55,7 @@ public class CloudSQLPostgreSQLConnectorConfig extends AbstractDBConnectorConfig
public CloudSQLPostgreSQLConnectorConfig(String username, String password, String jdbcPluginName,
String connectionArguments, String instanceType,
- String connectionName, String database) {
+ String connectionName, String database, @Nullable Integer port) {
this.user = username;
this.password = password;
this.jdbcPluginName = jdbcPluginName;
@@ -54,6 +63,7 @@ public CloudSQLPostgreSQLConnectorConfig(String username, String password, Strin
this.instanceType = instanceType;
this.connectionName = connectionName;
this.database = database;
+ this.port = port;
}
public String getDatabase() {
@@ -68,12 +78,17 @@ public String getConnectionName() {
return connectionName;
}
+ public int getPort() {
+ return port == null ? 5432 : port;
+ }
+
@Override
public String getConnectionString() {
if (CloudSQLUtil.PRIVATE_INSTANCE.equalsIgnoreCase(instanceType)) {
return String.format(
CloudSQLPostgreSQLConstants.PRIVATE_CLOUDSQL_POSTGRES_CONNECTION_STRING_FORMAT,
connectionName,
+ getPort(),
database);
}
@@ -82,4 +97,10 @@ public String getConnectionString() {
database,
connectionName);
}
+
+ @Override
+ public boolean canConnect() {
+ return super.canConnect() && !containsMacro(CloudSQLUtil.CONNECTION_NAME) &&
+ !containsMacro(ConnectionConfig.PORT) && !containsMacro(ConnectionConfig.DATABASE);
+ }
}
diff --git a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConstants.java b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConstants.java
index 946171102..8296ed344 100644
--- a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConstants.java
+++ b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConstants.java
@@ -26,5 +26,5 @@ private CloudSQLPostgreSQLConstants() {
public static final String CONNECTION_TIMEOUT = "connectionTimeout";
public static final String PUBLIC_CLOUDSQL_POSTGRES_CONNECTION_STRING_FORMAT =
"jdbc:postgresql:///%s?cloudSqlInstance=%s&socketFactory=com.google.cloud.sql.postgres.SocketFactory";
- public static final String PRIVATE_CLOUDSQL_POSTGRES_CONNECTION_STRING_FORMAT = "jdbc:postgresql://%s/%s";
+ public static final String PRIVATE_CLOUDSQL_POSTGRES_CONNECTION_STRING_FORMAT = "jdbc:postgresql://%s:%s/%s";
}
diff --git a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSink.java b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSink.java
index 4451073d6..c3a5ee92f 100644
--- a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSink.java
+++ b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSink.java
@@ -126,7 +126,8 @@ protected LineageRecorder getLineageRecorder(BatchSinkContext context) {
host = connectionParams[2];
location = connectionParams[1];
}
- String fqn = DBUtils.constructFQN("postgres", host, 5432,
+ String fqn = DBUtils.constructFQN("postgres", host,
+ cloudsqlPostgresqlSinkConfig.getConnection().getPort(),
cloudsqlPostgresqlSinkConfig.getConnection().getDatabase(),
cloudsqlPostgresqlSinkConfig.getReferenceName());
Asset.Builder assetBuilder = Asset.builder(cloudsqlPostgresqlSinkConfig.getReferenceName()).setFqn(fqn);
diff --git a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSource.java b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSource.java
index 6eb1765da..6d6ba29f8 100644
--- a/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSource.java
+++ b/cloudsql-postgresql-plugin/src/main/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLSource.java
@@ -93,6 +93,7 @@ protected String createConnectionString() {
return String.format(
CloudSQLPostgreSQLConstants.PRIVATE_CLOUDSQL_POSTGRES_CONNECTION_STRING_FORMAT,
cloudsqlPostgresqlSourceConfig.connection.getConnectionName(),
+ cloudsqlPostgresqlSourceConfig.connection.getPort(),
cloudsqlPostgresqlSourceConfig.connection.getDatabase());
}
@@ -116,7 +117,8 @@ protected LineageRecorder getLineageRecorder(BatchSourceContext context) {
host = connectionParams[2];
location = connectionParams[1];
}
- String fqn = DBUtils.constructFQN("postgres", host, 5432,
+ String fqn = DBUtils.constructFQN("postgres", host,
+ cloudsqlPostgresqlSourceConfig.getConnection().getPort(),
cloudsqlPostgresqlSourceConfig.getConnection().getDatabase(),
cloudsqlPostgresqlSourceConfig.getReferenceName());
Asset.Builder assetBuilder = Asset.builder(cloudsqlPostgresqlSourceConfig.getReferenceName()).setFqn(fqn);
diff --git a/cloudsql-postgresql-plugin/src/test/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorTest.java b/cloudsql-postgresql-plugin/src/test/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorTest.java
index dce4ed443..215417114 100644
--- a/cloudsql-postgresql-plugin/src/test/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorTest.java
+++ b/cloudsql-postgresql-plugin/src/test/java/io/cdap/plugin/cloudsql/postgres/CloudSQLPostgreSQLConnectorTest.java
@@ -54,7 +54,7 @@ public void test() throws IOException, ClassNotFoundException, InstantiationExce
test(
new CloudSQLPostgreSQLConnector(
new CloudSQLPostgreSQLConnectorConfig(username, password, JDBC_PLUGIN_NAME, connectionArguments, instanceType,
- connectionName, database)
+ connectionName, database, null)
),
JDBC_DRIVER_CLASS_NAME,
CloudSQLPostgreSQLConstants.PLUGIN_NAME
diff --git a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-action.json b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-action.json
index 9e9f124b9..eab240679 100644
--- a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-action.json
+++ b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-action.json
@@ -73,6 +73,14 @@
"placeholder": "CloudSQL instance connection name"
}
},
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "5432"
+ }
+ },
{
"widget-type": "textarea",
"label": "Database Command",
@@ -105,5 +113,19 @@
}
]
}
+ ],
+ "filters": [
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
+ }
]
}
diff --git a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsink.json b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsink.json
index 8b51ff2c4..bf4bde01d 100644
--- a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsink.json
+++ b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsink.json
@@ -66,6 +66,14 @@
"placeholder": "CloudSQL instance connection name"
}
},
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "5432"
+ }
+ },
{
"widget-type": "textbox",
"label": "Username",
@@ -206,6 +214,18 @@
"name": "connection"
}
]
+ },
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
}
],
"jump-config": {
diff --git a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsource.json b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsource.json
index 825d5714c..96ea97ac2 100644
--- a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsource.json
+++ b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-batchsource.json
@@ -66,6 +66,14 @@
"placeholder": "CloudSQL instance connection name"
}
},
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "5432"
+ }
+ },
{
"widget-type": "textbox",
"label": "Username",
@@ -235,6 +243,18 @@
"name": "connection"
}
]
+ },
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
}
],
"jump-config": {
diff --git a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-connector.json b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-connector.json
index de3af0795..9824f91bd 100644
--- a/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-connector.json
+++ b/cloudsql-postgresql-plugin/widgets/CloudSQLPostgreSQL-connector.json
@@ -46,6 +46,14 @@
"widget-attributes": {
"placeholder": "CloudSQL instance connection name"
}
+ },
+ {
+ "widget-type": "number",
+ "label": "Port",
+ "name": "port",
+ "widget-attributes": {
+ "default": "5432"
+ }
}
]
},
@@ -88,5 +96,19 @@
]
}
],
- "outputs": []
+ "outputs": [],
+ "filters": [
+ {
+ "name": "showPrivateInstanceProperties ",
+ "condition": {
+ "expression": "instanceType == 'private'"
+ },
+ "show": [
+ {
+ "type": "property",
+ "name": "port"
+ }
+ ]
+ }
+ ]
}
diff --git a/database-commons/pom.xml b/database-commons/pom.xml
index 8a4dd3f5d..b904887c5 100644
--- a/database-commons/pom.xml
+++ b/database-commons/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Database Commons
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/ConnectionConfig.java b/database-commons/src/main/java/io/cdap/plugin/db/ConnectionConfig.java
index 588ed78b8..c5320e25e 100644
--- a/database-commons/src/main/java/io/cdap/plugin/db/ConnectionConfig.java
+++ b/database-commons/src/main/java/io/cdap/plugin/db/ConnectionConfig.java
@@ -45,6 +45,7 @@ public abstract class ConnectionConfig extends PluginConfig implements DatabaseC
public static final String CONNECTION_ARGUMENTS = "connectionArguments";
public static final String JDBC_PLUGIN_NAME = "jdbcPluginName";
public static final String JDBC_PLUGIN_TYPE = "jdbc";
+ public static final String TRANSACTION_ISOLATION_LEVEL = "transactionIsolationLevel";
@Name(JDBC_PLUGIN_NAME)
@Description("Name of the JDBC driver to use. This is the value of the 'jdbcPluginName' key defined in the JSON " +
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/DBRecord.java b/database-commons/src/main/java/io/cdap/plugin/db/DBRecord.java
index 1d1743177..5507ab6b4 100644
--- a/database-commons/src/main/java/io/cdap/plugin/db/DBRecord.java
+++ b/database-commons/src/main/java/io/cdap/plugin/db/DBRecord.java
@@ -219,7 +219,7 @@ public void write(PreparedStatement stmt) throws SQLException {
}
}
- private Schema getNonNullableSchema(Schema.Field field) {
+ protected Schema getNonNullableSchema(Schema.Field field) {
Schema schema = field.getSchema();
if (field.getSchema().isNullable()) {
schema = field.getSchema().getNonNullable();
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/connector/AbstractDBSpecificConnectorConfig.java b/database-commons/src/main/java/io/cdap/plugin/db/connector/AbstractDBSpecificConnectorConfig.java
index 5c6b08031..8de0e4d70 100644
--- a/database-commons/src/main/java/io/cdap/plugin/db/connector/AbstractDBSpecificConnectorConfig.java
+++ b/database-commons/src/main/java/io/cdap/plugin/db/connector/AbstractDBSpecificConnectorConfig.java
@@ -20,8 +20,9 @@
import io.cdap.cdap.api.annotation.Macro;
import io.cdap.cdap.api.annotation.Name;
import io.cdap.plugin.db.ConnectionConfig;
+import io.cdap.plugin.db.TransactionIsolationLevel;
-import java.util.Collections;
+import java.util.HashMap;
import java.util.Map;
import javax.annotation.Nullable;
@@ -42,6 +43,12 @@ public abstract class AbstractDBSpecificConnectorConfig extends AbstractDBConnec
@Nullable
protected Integer port;
+ @Name(ConnectionConfig.TRANSACTION_ISOLATION_LEVEL)
+ @Description("The transaction isolation level for the database session.")
+ @Macro
+ @Nullable
+ protected String transactionIsolationLevel;
+
public String getHost() {
return host;
}
@@ -55,4 +62,21 @@ public int getPort() {
public boolean canConnect() {
return super.canConnect() && !containsMacro(ConnectionConfig.HOST) && !containsMacro(ConnectionConfig.PORT);
}
+
+ @Override
+ public Map getAdditionalArguments() {
+ Map additonalArguments = new HashMap<>();
+ if (getTransactionIsolationLevel() != null) {
+ additonalArguments.put(TransactionIsolationLevel.CONF_KEY, getTransactionIsolationLevel());
+ }
+ return additonalArguments;
+ }
+
+ public String getTransactionIsolationLevel() {
+ if (transactionIsolationLevel == null) {
+ return null;
+ }
+ return TransactionIsolationLevel.Level.valueOf(transactionIsolationLevel).name();
+ }
}
+
diff --git a/database-commons/src/main/java/io/cdap/plugin/db/sink/ETLDBOutputFormat.java b/database-commons/src/main/java/io/cdap/plugin/db/sink/ETLDBOutputFormat.java
index 246aaefda..3917f514a 100644
--- a/database-commons/src/main/java/io/cdap/plugin/db/sink/ETLDBOutputFormat.java
+++ b/database-commons/src/main/java/io/cdap/plugin/db/sink/ETLDBOutputFormat.java
@@ -89,8 +89,8 @@ public void close(TaskAttemptContext context) throws IOException {
try {
if (!emptyData) {
getStatement().executeBatch();
- getConnection().commit();
}
+ getConnection().commit();
} catch (SQLException e) {
try {
getConnection().rollback();
@@ -127,6 +127,7 @@ public void write(K key, V value) throws IOException {
// This is done to reduce memory usage in the worker, as processed records can now be GC'd.
if (batchSize > 0 && numWrittenRecords % batchSize == 0) {
getStatement().executeBatch();
+ emptyData = true;
}
} catch (SQLException e) {
throw new IOException(e);
diff --git a/database-commons/src/main/java/io/cdap/plugin/util/DBUtils.java b/database-commons/src/main/java/io/cdap/plugin/util/DBUtils.java
index 4a7d979c6..584c7bb3f 100644
--- a/database-commons/src/main/java/io/cdap/plugin/util/DBUtils.java
+++ b/database-commons/src/main/java/io/cdap/plugin/util/DBUtils.java
@@ -59,7 +59,7 @@
public final class DBUtils {
private static final Logger LOG = LoggerFactory.getLogger(DBUtils.class);
- private static final Calendar PURE_GREGORIAN_CALENDAR = createPureGregorianCalender();
+ public static final Calendar PURE_GREGORIAN_CALENDAR = createPureGregorianCalender();
// Java by default uses October 15, 1582 as a Gregorian cut over date.
// Any timestamp created with time less than this cut over date is treated as Julian date.
diff --git a/db2-plugin/pom.xml b/db2-plugin/pom.xml
index eaee6afd2..88a65a403 100644
--- a/db2-plugin/pom.xml
+++ b/db2-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
IBM DB2 plugin
@@ -98,7 +98,7 @@
<_exportcontents>
- io.cdap.plugin.db2.*
+ io.cdap.plugin.db2.*;
io.cdap.plugin.db.source.*;
io.cdap.plugin.db.sink.*;
org.apache.commons.lang;
diff --git a/generic-database-plugin/pom.xml b/generic-database-plugin/pom.xml
index 81fe1a175..a68a30a3c 100644
--- a/generic-database-plugin/pom.xml
+++ b/generic-database-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Generic database plugin
diff --git a/generic-db-argument-setter/pom.xml b/generic-db-argument-setter/pom.xml
index 1e9bc559d..3452fd788 100644
--- a/generic-db-argument-setter/pom.xml
+++ b/generic-db-argument-setter/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Generic database argument setter plugin
diff --git a/mariadb-plugin/pom.xml b/mariadb-plugin/pom.xml
index 9a3f11b20..13a3001a5 100644
--- a/mariadb-plugin/pom.xml
+++ b/mariadb-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Maria DB plugin
diff --git a/memsql-plugin/pom.xml b/memsql-plugin/pom.xml
index 183b8608b..06e576ab0 100644
--- a/memsql-plugin/pom.xml
+++ b/memsql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Memsql plugin
diff --git a/mssql-plugin/docs/SQL Server-connector.md b/mssql-plugin/docs/SQL Server-connector.md
index cb72161f5..6f0038715 100644
--- a/mssql-plugin/docs/SQL Server-connector.md
+++ b/mssql-plugin/docs/SQL Server-connector.md
@@ -22,6 +22,14 @@ authentication. Optional for databases that do not require authentication.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in SQL Server, refer to the [SQL Server documentation](https://learn.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-ver16)
+
**Authentication Type:** Indicates which authentication method will be used for the connection. Use 'SQL Login'. to
connect to a SQL Server using username and password properties. Use 'Active Directory Password' to connect to an Azure
SQL Database/Data Warehouse using an Azure AD principal name and password.
diff --git a/mssql-plugin/docs/SqlServer-batchsink.md b/mssql-plugin/docs/SqlServer-batchsink.md
index 1347ff8d3..cad8fabd6 100644
--- a/mssql-plugin/docs/SqlServer-batchsink.md
+++ b/mssql-plugin/docs/SqlServer-batchsink.md
@@ -46,6 +46,14 @@ an Azure SQL Database/Data Warehouse using an Azure AD principal name and passwo
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in SQL Server, refer to the [SQL Server documentation](https://learn.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-ver16)
+
**Instance Name:** SQL Server instance name to connect to. When it is not specified, a
connection is made to the default instance. For the case where both the instanceName and port are specified,
see the notes for port. If you specify a Virtual Network Name in the Server connection property, you cannot
diff --git a/mssql-plugin/docs/SqlServer-batchsource.md b/mssql-plugin/docs/SqlServer-batchsource.md
index 4e091dc1b..bc0fa784c 100644
--- a/mssql-plugin/docs/SqlServer-batchsource.md
+++ b/mssql-plugin/docs/SqlServer-batchsource.md
@@ -56,6 +56,14 @@ an Azure SQL Database/Data Warehouse using an Azure AD principal name and passwo
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in SQL Server, refer to the [SQL Server documentation](https://learn.microsoft.com/en-us/sql/t-sql/statements/set-transaction-isolation-level-transact-sql?view=sql-server-ver16)
+
**Instance Name:** SQL Server instance name to connect to. When it is not specified, a
connection is made to the default instance. For the case where both the instanceName and port are specified,
see the notes for port. If you specify a Virtual Network Name in the Server connection property, you cannot
diff --git a/mssql-plugin/pom.xml b/mssql-plugin/pom.xml
index 2bbbee989..996639ece 100644
--- a/mssql-plugin/pom.xml
+++ b/mssql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Microsoft SQL Server plugin
diff --git a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java
index 0fa8991c5..7b749cdc5 100644
--- a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java
+++ b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSink.java
@@ -167,6 +167,11 @@ public Map getDBSpecificArguments() {
packetSize, queryTimeout);
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public String getConnectionString() {
return String.format(SqlServerConstants.SQL_SERVER_CONNECTION_STRING_FORMAT,
diff --git a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java
index a5f36215b..f4e1c95f9 100644
--- a/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java
+++ b/mssql-plugin/src/main/java/io/cdap/plugin/mssql/SqlServerSource.java
@@ -188,6 +188,11 @@ public List getInitQueries() {
return Collections.emptyList();
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public void validate(FailureCollector collector) {
ConfigUtil.validateConnection(this, useConnection, connection, collector);
diff --git a/mssql-plugin/widgets/SQL Server-connector.json b/mssql-plugin/widgets/SQL Server-connector.json
index 171076295..c326cd81d 100644
--- a/mssql-plugin/widgets/SQL Server-connector.json
+++ b/mssql-plugin/widgets/SQL Server-connector.json
@@ -64,6 +64,20 @@
"widget-type": "password",
"label": "Password",
"name": "password"
+ },
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
}
]
},
diff --git a/mssql-plugin/widgets/SqlServer-batchsink.json b/mssql-plugin/widgets/SqlServer-batchsink.json
index 02f8326db..91d3bbbd7 100644
--- a/mssql-plugin/widgets/SqlServer-batchsink.json
+++ b/mssql-plugin/widgets/SqlServer-batchsink.json
@@ -84,6 +84,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -267,6 +281,10 @@
{
"type": "property",
"name": "connectionArguments"
+ },
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
}
]
},
diff --git a/mssql-plugin/widgets/SqlServer-batchsource.json b/mssql-plugin/widgets/SqlServer-batchsource.json
index dad5f4708..b3494e485 100644
--- a/mssql-plugin/widgets/SqlServer-batchsource.json
+++ b/mssql-plugin/widgets/SqlServer-batchsource.json
@@ -84,6 +84,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -316,6 +330,10 @@
{
"type": "property",
"name": "connectionArguments"
+ },
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
}
]
},
diff --git a/mysql-plugin/docs/MySQL-connector.md b/mysql-plugin/docs/MySQL-connector.md
index fb5c1fbb8..f586084c1 100644
--- a/mysql-plugin/docs/MySQL-connector.md
+++ b/mysql-plugin/docs/MySQL-connector.md
@@ -22,6 +22,14 @@ authentication. Optional for databases that do not require authentication.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in MySQL, refer to the [MySQL documentation](https://dev.mysql.com/doc/refman/8.4/en/innodb-transaction-isolation-levels.html)
+
**Connection Arguments:** A list of arbitrary string tag/value pairs as connection arguments. These arguments
will be passed to the JDBC driver, as connection arguments, for JDBC drivers that may need additional configurations.
This is a semicolon-separated list of key-value pairs, where each pair is separated by a equals '=' and specifies
diff --git a/mysql-plugin/docs/Mysql-batchsink.md b/mysql-plugin/docs/Mysql-batchsink.md
index da86f4259..1c31e56cf 100644
--- a/mysql-plugin/docs/Mysql-batchsink.md
+++ b/mysql-plugin/docs/Mysql-batchsink.md
@@ -39,6 +39,14 @@ You also can use the macro function ${conn(connection-name)}.
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the databse connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in MySQL, refer to the [MySQL documentation](https://dev.mysql.com/doc/refman/8.4/en/innodb-transaction-isolation-levels.html)
+
**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/mysql-plugin/docs/Mysql-batchsource.md b/mysql-plugin/docs/Mysql-batchsource.md
index 30a729cd2..44f94643c 100644
--- a/mysql-plugin/docs/Mysql-batchsource.md
+++ b/mysql-plugin/docs/Mysql-batchsource.md
@@ -49,6 +49,14 @@ For example, 'SELECT MIN(id),MAX(id) FROM table'. Not required if numSplits is s
**Password:** Password to use to connect to the specified database.
+**Transaction Isolation Level** The transaction isolation level of the database connection
+- TRANSACTION_READ_COMMITTED: No dirty reads. Non-repeatable reads and phantom reads are possible.
+- TRANSACTION_SERIALIZABLE: No dirty reads. Non-repeatable and phantom reads are prevented.
+- TRANSACTION_REPEATABLE_READ: No dirty reads. Prevents non-repeatable reads, but phantom reads are still possible.
+- TRANSACTION_READ_UNCOMMITTED: Allows dirty reads (reading uncommitted changes from other transactions). Non-repeatable reads and phantom reads are possible.
+
+For more details on the Transaction Isolation Levels supported in MySQL, refer to the [MySQL documentation](https://dev.mysql.com/doc/refman/8.4/en/innodb-transaction-isolation-levels.html)
+
**Connection Arguments:** A list of arbitrary string key/value pairs as connection arguments. These arguments
will be passed to the JDBC driver as connection arguments for JDBC drivers that may need additional configurations.
diff --git a/mysql-plugin/pom.xml b/mysql-plugin/pom.xml
index e753f4a29..b2d928684 100644
--- a/mysql-plugin/pom.xml
+++ b/mysql-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Mysql plugin
diff --git a/mysql-plugin/src/e2e-test/java/io/cdap/plugin/MysqlClient.java b/mysql-plugin/src/e2e-test/java/io/cdap/plugin/MysqlClient.java
index 8fee5fa6e..05ea354fc 100644
--- a/mysql-plugin/src/e2e-test/java/io/cdap/plugin/MysqlClient.java
+++ b/mysql-plugin/src/e2e-test/java/io/cdap/plugin/MysqlClient.java
@@ -16,6 +16,7 @@
package io.cdap.plugin;
+import com.google.common.base.Strings;
import io.cdap.e2e.utils.PluginPropertyUtils;
import org.junit.Assert;
@@ -162,9 +163,13 @@ public static void createSourceDatatypesTable(String sourceTable) throws SQLExce
statement.executeUpdate(createSourceTableQuery);
// Insert dummy data.
- String datatypesValues = PluginPropertyUtils.pluginProp("datatypesValues");
- String datatypesColumnsList = PluginPropertyUtils.pluginProp("datatypesColumnsList");
- statement.executeUpdate("INSERT INTO " + sourceTable + " " + datatypesColumnsList + " " + datatypesValues);
+ int rowCount = 1;
+ while (!Strings.isNullOrEmpty(PluginPropertyUtils.pluginProp("datatypesValue" + rowCount))) {
+ String datatypesValues = PluginPropertyUtils.pluginProp("datatypesValue" + rowCount);
+ String datatypesColumnsList = PluginPropertyUtils.pluginProp("datatypesColumnsList");
+ statement.executeUpdate("INSERT INTO " + sourceTable + " " + datatypesColumnsList + " " + datatypesValues);
+ rowCount++;
+ }
}
}
diff --git a/mysql-plugin/src/e2e-test/resources/pluginParameters.properties b/mysql-plugin/src/e2e-test/resources/pluginParameters.properties
index b5669cb76..09e8fafaf 100644
--- a/mysql-plugin/src/e2e-test/resources/pluginParameters.properties
+++ b/mysql-plugin/src/e2e-test/resources/pluginParameters.properties
@@ -13,25 +13,40 @@ datatypesColumns=(ID varchar(100) PRIMARY KEY, COL1 bigint(20), COL2 bigint(20)
COL12 enum('A','B','C'), COL13 float, COL14 int(11), COL15 int(10) unsigned, COL16 mediumblob, COL17 mediumtext, \
COL18 longblob, COL19 longtext, COL20 mediumint(9), COL21 mediumint(8) unsigned, COL22 set('X','y','Z'), \
COL23 smallint(6), COL24 smallint(5) unsigned, COL25 text, COL26 time, COL27 timestamp, COL28 tinyblob, \
- COL29 tinyint(4), COL30 tinyint(3) unsigned, COL31 tinytext, COL32 varbinary(100), COL33 json)
+ COL29 tinyint(4), COL30 tinyint(3) unsigned, COL31 tinytext, COL32 varbinary(100), COL33 json, COL34 year)
datatypesColumnsList=(ID,COL1,COL2,COL3,COL4,COL5,COL6,COL7,COL8,COL9,COL10,COL11,COL12,COL13,COL14,COL15,COL16,COL17,\
- COL18,COL19,COL20,COL21,COL22,COL23,COL24,COL25,COL26,COL27,COL28,COL29,COL30,COL31,COL32,COL33)
-datatypesValues=VALUES ('User1',1000000000000000000,1000000000000000000,1,1,\
- HEX('27486920546869732069732061206C6F6E6720746578742E27'),1,'A','2023-01-01','2023-01-01 00:00:00',1234,\
- 1234.5678,'A',22.0,-1234,1234,HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
+ COL18,COL19,COL20,COL21,COL22,COL23,COL24,COL25,COL26,COL27,COL28,COL29,COL30,COL31,COL32,COL33,COL34)
+datatypesValue1=VALUES ('User1',-9223372036854775808,null,1,1,\
+ HEX('27486920546869732069732061206C6F6E6720746578742E27'),-1,'A','2023-01-01','2023-01-01 00:00:00',1234,\
+ 1234.5678,'A',22.0,-2147483648,0,HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
'This is a test message',HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
- 'This is a test message\n\n',-1234,1234,'X',-1234,1234,'This is a test message','00:00:00','2023-01-01 00:00:00',\
- HEX('27486920546869732069732061206C6F6E6720746578742E27'),-100,100,'This is a test message',1,\
- '{"key1": "value1", "key2": "value2"}')
+ 'This is a test message\n\n',null,0,'X',-32768,0,'This is a test message','00:00:00','2023-01-01 00:00:00',\
+ HEX('27486920546869732069732061206C6F6E6720746578742E27'),-128,0,'This is a test message',1,\
+ '{"key1": "value1", "key2": "value2"}',2023)
+datatypesValue2=VALUES ('User2',9223372036854775807,18446744073709551615,1,1,\
+ HEX('27486920546869732069732061206C6F6E6720746578742E27'),127,'A','2023-01-01','2023-01-01 00:00:00',1234,\
+ 1234.5678,'A',22.0,2147483647,4294967295,HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
+ 'This is a test message',HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
+ 'This is a test message\n\n',8388607,16777215,'X',32767,65535,'This is a test message','00:00:00','2023-01-01 00:00:00',\
+ HEX('27486920546869732069732061206C6F6E6720746578742E27'),127,255,'This is a test message',1,\
+ '{"key1": "value1", "key2": "value2"}',0)
+datatypesValue3=VALUES ('User3',null,0,1,1,\
+ HEX('27486920546869732069732061206C6F6E6720746578742E27'),null,'A','2023-01-01','2023-01-01 00:00:00',1234,\
+ 1234.5678,'A',22.0,null,null,HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
+ 'This is a test message',HEX('27486920546869732069732061206C6F6E6720746578742E27'),\
+ 'This is a test message\n\n',-8388608,null,'X',null,null,'This is a test message','00:00:00','2023-01-01 00:00:00',\
+ HEX('27486920546869732069732061206C6F6E6720746578742E27'),null,null,'This is a test message',1,\
+ '{"key1": "value1", "key2": "value2"}',null)
datatypesSchema=[{"key":"ID","value":"string"},{"key":"COL1","value":"long"},{"key":"COL2","value":"decimal"},\
{"key":"COL3","value":"bytes"},{"key":"COL4","value":"boolean"},{"key":"COL5","value":"bytes"},\
- {"key":"COL6","value":"boolean"},{"key":"COL7","value":"string"},{"key":"COL8","value":"date"},\
+ {"key":"COL6","value":"int"},{"key":"COL7","value":"string"},{"key":"COL8","value":"date"},\
{"key":"COL9","value":"timestamp"},{"key":"COL10","value":"decimal"},{"key":"COL11","value":"double"},\
{"key":"COL12","value":"string"},{"key":"COL13","value":"float"},{"key":"COL14","value":"int"},\
{"key":"COL15","value":"long"},{"key":"COL16","value":"bytes"},{"key":"COL17","value":"string"},\
{"key":"COL18","value":"bytes"},{"key":"COL19","value":"string"},{"key":"COL20","value":"int"},\
- {"key":"COL21","value":"long"},{"key":"COL22","value":"string"},{"key":"COL23","value":"int"},\
+ {"key":"COL21","value":"int"},{"key":"COL22","value":"string"},{"key":"COL23","value":"int"},\
{"key":"COL24","value":"int"},{"key":"COL25","value":"string"},{"key":"COL26","value":"time"},\
{"key":"COL27","value":"timestamp"},{"key":"COL28","value":"bytes"},{"key":"COL29","value":"int"},\
{"key":"COL30","value":"int"},{"key":"COL31","value":"string"},{"key":"COL32","value":"bytes"},\
- {"key":"COL33","value":"string"}]
+ {"key":"COL33","value":"string"},{"key":"COL34","value":"int"}]
+{"key":"COL33","value":"string"}]
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java
index 11a41e877..dcd2b9eda 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSink.java
@@ -148,6 +148,11 @@ public Map getDBSpecificArguments() {
trustCertificateKeyStorePassword, false);
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public MysqlConnectorConfig getConnection() {
return connection;
diff --git a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java
index e773b2469..00addbb28 100644
--- a/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java
+++ b/mysql-plugin/src/main/java/io/cdap/plugin/mysql/MysqlSource.java
@@ -180,6 +180,11 @@ public MysqlConnectorConfig getConnection() {
return connection;
}
+ @Override
+ public String getTransactionIsolationLevel() {
+ return connection.getTransactionIsolationLevel();
+ }
+
@Override
public void validate(FailureCollector collector) {
ConfigUtil.validateConnection(this, useConnection, connection, collector);
diff --git a/mysql-plugin/widgets/MySQL-connector.json b/mysql-plugin/widgets/MySQL-connector.json
index 9064d1bf6..f60f5526f 100644
--- a/mysql-plugin/widgets/MySQL-connector.json
+++ b/mysql-plugin/widgets/MySQL-connector.json
@@ -30,6 +30,20 @@
"widget-attributes": {
"default": "3306"
}
+ },
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
}
]
},
diff --git a/mysql-plugin/widgets/Mysql-batchsink.json b/mysql-plugin/widgets/Mysql-batchsink.json
index 80e58d251..5f85b3435 100644
--- a/mysql-plugin/widgets/Mysql-batchsink.json
+++ b/mysql-plugin/widgets/Mysql-batchsink.json
@@ -65,6 +65,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -212,6 +226,10 @@
"type": "property",
"name": "password"
},
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
+ },
{
"type": "property",
"name": "host"
diff --git a/mysql-plugin/widgets/Mysql-batchsource.json b/mysql-plugin/widgets/Mysql-batchsource.json
index 9175bd5ed..506e837f7 100644
--- a/mysql-plugin/widgets/Mysql-batchsource.json
+++ b/mysql-plugin/widgets/Mysql-batchsource.json
@@ -65,6 +65,20 @@
"label": "Password",
"name": "password"
},
+ {
+ "widget-type": "select",
+ "label": "Transaction Isolation Level",
+ "name": "transactionIsolationLevel",
+ "widget-attributes": {
+ "values": [
+ "TRANSACTION_READ_UNCOMMITTED",
+ "TRANSACTION_READ_COMMITTED",
+ "TRANSACTION_REPEATABLE_READ",
+ "TRANSACTION_SERIALIZABLE"
+ ],
+ "default": "TRANSACTION_SERIALIZABLE"
+ }
+ },
{
"widget-type": "keyvalue",
"label": "Connection Arguments",
@@ -277,6 +291,10 @@
"type": "property",
"name": "password"
},
+ {
+ "type": "property",
+ "name": "transactionIsolationLevel"
+ },
{
"type": "property",
"name": "host"
diff --git a/netezza-plugin/pom.xml b/netezza-plugin/pom.xml
index 86101960d..1d8783944 100644
--- a/netezza-plugin/pom.xml
+++ b/netezza-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Netezza plugin
diff --git a/oracle-plugin/pom.xml b/oracle-plugin/pom.xml
index c4f660cf6..0e649a464 100644
--- a/oracle-plugin/pom.xml
+++ b/oracle-plugin/pom.xml
@@ -20,7 +20,7 @@
database-plugins-parent
io.cdap.plugin
- 1.10.0-SNAPSHOT
+ 1.10.8
Oracle plugin
diff --git a/oracle-plugin/src/e2e-test/features/oracle/Datatype.feature b/oracle-plugin/src/e2e-test/features/oracle/Datatype.feature
index 8ce515ca8..d19e77d85 100644
--- a/oracle-plugin/src/e2e-test/features/oracle/Datatype.feature
+++ b/oracle-plugin/src/e2e-test/features/oracle/Datatype.feature
@@ -16,7 +16,7 @@
@Oracle
Feature: Oracle - Verify Oracle source data transfer for multiple datatypes
- @ORACLE_SOURCE_DATATYPES_TEST @ORACLE_SINK_TEST @Oracle_Required
+ @ORACLE_SOURCE_DATATYPES_TEST @ORACLE_TARGET_DATATYPES_TEST @Oracle_Required
# Oracle Sanity test to transfer table data containing multiple datatypes
Scenario: To verify data is getting transferred from Oracle to Oracle successfully
Given Open Datafusion Project to configure pipeline
@@ -32,7 +32,7 @@ Feature: Oracle - Verify Oracle source data transfer for multiple datatypes
Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Enter input plugin property: "referenceName" with value: "sourceRef"
Then Replace input plugin property: "database" with value: "databaseName"
Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
@@ -51,7 +51,7 @@ Feature: Oracle - Verify Oracle source data transfer for multiple datatypes
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Enter input plugin property: "referenceName" with value: "targetRef"
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Validate "Oracle2" plugin properties
Then Close the Plugin Properties page
Then Save the pipeline
diff --git a/oracle-plugin/src/e2e-test/features/oracle/DatatypeLong.feature b/oracle-plugin/src/e2e-test/features/oracle/DatatypeLong.feature
index b471be94b..1eed6713f 100644
--- a/oracle-plugin/src/e2e-test/features/oracle/DatatypeLong.feature
+++ b/oracle-plugin/src/e2e-test/features/oracle/DatatypeLong.feature
@@ -16,7 +16,7 @@
@Oracle
Feature: Oracle - Verify Oracle source data transfer of type LONG
- @ORACLE_SOURCE_DATATYPES_TEST2 @ORACLE_SINK_TEST @Oracle_Required
+ @ORACLE_SOURCE_DATATYPES_TEST2 @ORACLE_TARGET_DATATYPES_TEST2 @Oracle_Required
# Oracle Sanity test to transfer table data containing LONG
Scenario: To verify data is getting transferred from Oracle to Oracle successfully
Given Open Datafusion Project to configure pipeline
@@ -32,7 +32,7 @@ Feature: Oracle - Verify Oracle source data transfer of type LONG
Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Enter input plugin property: "referenceName" with value: "sourceRef"
Then Replace input plugin property: "database" with value: "databaseName"
Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
@@ -51,7 +51,7 @@ Feature: Oracle - Verify Oracle source data transfer of type LONG
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Enter input plugin property: "referenceName" with value: "targetRef"
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Validate "Oracle2" plugin properties
Then Close the Plugin Properties page
Then Save the pipeline
diff --git a/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongRaw.feature b/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongRaw.feature
index 15dfe4e15..05436f1d8 100644
--- a/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongRaw.feature
+++ b/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongRaw.feature
@@ -16,7 +16,7 @@
@Oracle
Feature: Oracle - Verify Oracle source data transfer of type LONG RAW
- @ORACLE_SOURCE_LONGRAW_TEST @ORACLE_SINK_TEST @Oracle_Required
+ @ORACLE_SOURCE_LONGRAW_TEST @ORACLE_TARGET_LONGRAW_TEST @Oracle_Required
# Oracle Sanity test to transfer table data containing LONG RAW
Scenario: To verify data is getting transferred from Oracle to Oracle successfully
Given Open Datafusion Project to configure pipeline
@@ -32,7 +32,7 @@ Feature: Oracle - Verify Oracle source data transfer of type LONG RAW
Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Enter input plugin property: "referenceName" with value: "sourceRef"
Then Replace input plugin property: "database" with value: "databaseName"
Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
@@ -51,7 +51,7 @@ Feature: Oracle - Verify Oracle source data transfer of type LONG RAW
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Enter input plugin property: "referenceName" with value: "targetRef"
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Validate "Oracle2" plugin properties
Then Close the Plugin Properties page
Then Save the pipeline
diff --git a/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongVarchar.feature b/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongVarchar.feature
index e6fc1dd8d..dd75e445b 100644
--- a/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongVarchar.feature
+++ b/oracle-plugin/src/e2e-test/features/oracle/DatatypeLongVarchar.feature
@@ -16,7 +16,7 @@
@Oracle
Feature: Oracle - Verify Oracle source data transfer of type LONG VARCHAR
- @ORACLE_SOURCE_DATATYPES_TEST4 @ORACLE_SINK_TEST @Oracle_Required
+ @ORACLE_SOURCE_DATATYPES_TEST4 @ORACLE_TARGET_DATATYPES_TEST4 @Oracle_Required
# Oracle Sanity test to transfer table data containing LONG VARCHAR
Scenario: To verify data is getting transferred from Oracle to Oracle successfully
Given Open Datafusion Project to configure pipeline
@@ -32,7 +32,7 @@ Feature: Oracle - Verify Oracle source data transfer of type LONG VARCHAR
Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Enter input plugin property: "referenceName" with value: "sourceRef"
Then Replace input plugin property: "database" with value: "databaseName"
Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
@@ -51,7 +51,7 @@ Feature: Oracle - Verify Oracle source data transfer of type LONG VARCHAR
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Enter input plugin property: "referenceName" with value: "targetRef"
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Validate "Oracle2" plugin properties
Then Close the Plugin Properties page
Then Save the pipeline
diff --git a/oracle-plugin/src/e2e-test/features/oracle/DatatypeTimestamp.feature b/oracle-plugin/src/e2e-test/features/oracle/DatatypeTimestamp.feature
new file mode 100644
index 000000000..1f0954a10
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/oracle/DatatypeTimestamp.feature
@@ -0,0 +1,69 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle
+Feature: Oracle - Verify Oracle source data transfer for all Timestamp types
+ @ORACLE_SOURCE_DATATYPE_TIMESTAMP @ORACLE_TARGET_DATATYPE_TIMESTAMP @Oracle_Required
+ Scenario: To verify data is getting transferred from Oracle to Oracle successfully
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Select dropdown plugin property: "select-transactionIsolationLevel" with option value: "TRANSACTION_READ_COMMITTED"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputTimestampDatatypesSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Select dropdown plugin property: "select-transactionIsolationLevel" with option value: "TRANSACTION_READ_COMMITTED"
+ Then Validate "Oracle2" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Verify the preview of pipeline is "success"
+ Then Click on preview data for Oracle sink
+ Then Verify preview output schema matches the outputSchema captured in properties
+ Then Close the preview data
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Validate the values of records transferred to target table is equal to the values from source table
diff --git a/oracle-plugin/src/e2e-test/features/oracle/Oracle.feature b/oracle-plugin/src/e2e-test/features/oracle/Oracle.feature
index f780ec6a5..3aecd62b9 100644
--- a/oracle-plugin/src/e2e-test/features/oracle/Oracle.feature
+++ b/oracle-plugin/src/e2e-test/features/oracle/Oracle.feature
@@ -16,7 +16,7 @@
@Oracle
Feature: Oracle - Verify Oracle source data transfer
- @ORACLE_SOURCE_TEST @ORACLE_SINK_TEST @Oracle_Required
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST @Oracle_Required
Scenario: To verify data is getting transferred from Oracle to Oracle successfully
Given Open Datafusion Project to configure pipeline
When Expand Plugin group in the LHS plugins list: "Source"
@@ -31,7 +31,7 @@ Feature: Oracle - Verify Oracle source data transfer
Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Enter input plugin property: "referenceName" with value: "sourceRef"
Then Replace input plugin property: "database" with value: "databaseName"
Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
@@ -50,7 +50,7 @@ Feature: Oracle - Verify Oracle source data transfer
Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
Then Enter input plugin property: "referenceName" with value: "targetRef"
Then Select radio button plugin property: "connectionType" with value: "service"
- Then Select radio button plugin property: "role" with value: "sysdba"
+ Then Select radio button plugin property: "role" with value: "normal"
Then Validate "Oracle2" plugin properties
Then Close the Plugin Properties page
Then Save the pipeline
diff --git a/oracle-plugin/src/e2e-test/features/sink/OracleDesignTime.feature b/oracle-plugin/src/e2e-test/features/sink/OracleDesignTime.feature
new file mode 100644
index 000000000..0a229b2a4
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/sink/OracleDesignTime.feature
@@ -0,0 +1,76 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle sink - Verify Oracle sink plugin design time scenarios
+
+ Scenario: To verify Oracle sink plugin validation with connection and basic details for connectivity
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+
+ Scenario: To verify Oracle sink plugin validation with connection arguments
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Enter key value pairs for plugin property: "connectionArguments" with values from json: "connectionArgumentsList"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+
+ Scenario: To verify Oracle sink plugin validation with advanced details with batch value
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "defaultBatchValue" with value: "batchValue"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
diff --git a/oracle-plugin/src/e2e-test/features/sink/OracleDesignTimeValidation.feature b/oracle-plugin/src/e2e-test/features/sink/OracleDesignTimeValidation.feature
new file mode 100644
index 000000000..936802e66
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/sink/OracleDesignTimeValidation.feature
@@ -0,0 +1,175 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle sink- Verify Oracle sink plugin design time validation scenarios
+
+ Scenario: To verify Oracle sink plugin validation errors for mandatory fields
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Validate button
+ Then Verify mandatory property error for below listed properties:
+ | jdbcPluginName |
+ | database |
+ | referenceName |
+ | tableName |
+
+ Scenario: To verify Oracle sink plugin validation error message with invalid reference test data
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "invalidRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "referenceName" is displaying an in-line error message: "errorMessageOracleInvalidReferenceName"
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify Oracle sink plugin validation error message with invalid database
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "TNS"
+ Then Replace input plugin property: "database" with value: "invalidDatabaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Click on the Validate button
+ Then Verify that the Plugin is displaying an error message: "errorMessageInvalidSinkDatabase" on the header
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify Oracle sink plugin validation error message with invalid table name
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "TNS"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "invalidTable"
+ Then Click on the Validate button
+ Then Verify that the Plugin is displaying an error message: "errorMessageInvalidTableName" on the header
+
+ Scenario: To verify Oracle sink plugin validation error message with blank username
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "user" is displaying an in-line error message: "errorMessageBlankUsername"
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify Oracle sink plugin validation error message with invalid Host
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "invalidHost" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "TNS"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "invalidTable"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Click on the Validate button
+ Then Verify that the Plugin is displaying an error message: "errorMessageInvalidHost" on the header
diff --git a/oracle-plugin/src/e2e-test/features/sink/OracleDesignTimeWithMacro.feature b/oracle-plugin/src/e2e-test/features/sink/OracleDesignTimeWithMacro.feature
new file mode 100644
index 000000000..9b5d86cd0
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/sink/OracleDesignTimeWithMacro.feature
@@ -0,0 +1,56 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle sink- Verify Oracle sink plugin design time macro scenarios
+
+ Scenario: To verify Oracle sink plugin validation with macro enabled fields for connection section
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUser"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Click on the Macro button of Property: "transactionIsolationLevel" and set the value to: "oracleTransactionLevel"
+ Then Click on the Macro button of Property: "connectionArguments" and set the value to: "oracleConnectionArguments"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+
+ Scenario: To verify Oracle sink plugin validation with macro enabled fields for basic section
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabaseName"
+ Then Click on the Macro button of Property: "tableName" and set the value to: "oracleTableName"
+ Then Click on the Macro button of Property: "dbSchemaName" and set the value to: "oracleSchemaName"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
diff --git a/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature b/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature
new file mode 100644
index 000000000..67293700b
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/sink/OracleRunTime.feature
@@ -0,0 +1,169 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle
+Feature: Oracle - Verify data transfer from BigQuery source to Oracle sink
+
+ @BQ_SOURCE_TEST @ORACLE_TEST_TABLE
+ Scenario: To verify data is getting transferred from BigQuery source to Oracle sink successfully
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "BigQuery" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "BigQuery" and "Oracle" to establish connection
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqSourceTable"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "bqOutputDatatypesSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Verify the preview of pipeline is "success"
+ Then Click on preview data for Oracle sink
+ Then Close the preview data
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Validate records transferred to target table with record counts of BigQuery table
+ Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table
+
+ @BQ_SOURCE_TEST @ORACLE_TEST_TABLE
+ Scenario: To verify data is getting transferred from BigQuery source to Oracle sink successfully when connection arguments are set
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "BigQuery" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "BigQuery" and "Oracle" to establish connection
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqSourceTable"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "bqOutputDatatypesSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Enter key value pairs for plugin property: "connectionArguments" with values from json: "connectionArgumentsList"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Verify the preview of pipeline is "success"
+ Then Click on preview data for Oracle sink
+ Then Close the preview data
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Validate records transferred to target table with record counts of BigQuery table
+ Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table
+
+ @BQ_SOURCE_TEST_SMALL_CASE @ORACLE_TEST_TABLE
+ Scenario: To verify data is getting transferred from BigQuery source to Oracle sink successfully when schema is coming in small case
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "BigQuery" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "BigQuery" and "Oracle" to establish connection
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqSourceTable"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "bqOutputDatatypesSchemaSmallCase"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Verify the preview of pipeline is "success"
+ Then Click on preview data for Oracle sink
+ Then Close the preview data
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Validate records transferred to target table with record counts of BigQuery table
+ Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table with case
diff --git a/oracle-plugin/src/e2e-test/features/sink/OracleRunTimeMacro.feature b/oracle-plugin/src/e2e-test/features/sink/OracleRunTimeMacro.feature
new file mode 100644
index 000000000..78130655f
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/sink/OracleRunTimeMacro.feature
@@ -0,0 +1,90 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle - Verify data transfer to Oracle sink with macro arguments
+
+ @BQ_SOURCE_TEST @ORACLE_TEST_TABLE
+ Scenario: To verify data is getting transferred from BigQuery source to Oracle sink using macro arguments
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "BigQuery" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "BigQuery" and "Oracle" to establish connection
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Click on the Macro button of Property: "projectId" and set the value to: "bqProjectId"
+ Then Click on the Macro button of Property: "datasetProjectId" and set the value to: "bqDatasetProjectId"
+ Then Click on the Macro button of Property: "dataset" and set the value to: "bqDataset"
+ Then Click on the Macro button of Property: "table" and set the value to: "bqTable"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUsername"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabaseName"
+ Then Click on the Macro button of Property: "tableName" and set the value to: "oracleTableName"
+ Then Click on the Macro button of Property: "dbSchemaName" and set the value to: "oracleSchemaName"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Enter runtime argument value "projectId" for key "bqProjectId"
+ Then Enter runtime argument value "projectId" for key "bqDatasetProjectId"
+ Then Enter runtime argument value "dataset" for key "bqDataset"
+ Then Enter runtime argument value "bqSourceTable" for key "bqTable"
+ Then Enter runtime argument value "driverName" for key "oracleDriverName"
+ Then Enter runtime argument value from environment variable "host" for key "oracleHost"
+ Then Enter runtime argument value from environment variable "port" for key "oraclePort"
+ Then Enter runtime argument value from environment variable "username" for key "oracleUsername"
+ Then Enter runtime argument value from environment variable "password" for key "oraclePassword"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabaseName"
+ Then Enter runtime argument value "targetTable" for key "oracleTableName"
+ Then Enter runtime argument value "schema" for key "oracleSchemaName"
+ Then Run the preview of pipeline with runtime arguments
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Enter runtime argument value "projectId" for key "bqProjectId"
+ Then Enter runtime argument value "projectId" for key "bqDatasetProjectId"
+ Then Enter runtime argument value "dataset" for key "bqDataset"
+ Then Enter runtime argument value "bqSourceTable" for key "bqTable"
+ Then Enter runtime argument value "driverName" for key "oracleDriverName"
+ Then Enter runtime argument value from environment variable "host" for key "oracleHost"
+ Then Enter runtime argument value from environment variable "port" for key "oraclePort"
+ Then Enter runtime argument value from environment variable "username" for key "oracleUsername"
+ Then Enter runtime argument value from environment variable "password" for key "oraclePassword"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabaseName"
+ Then Enter runtime argument value "targetTable" for key "oracleTableName"
+ Then Enter runtime argument value "schema" for key "oracleSchemaName"
+ Then Run the Pipeline in Runtime with runtime arguments
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate records transferred to target table with record counts of BigQuery table
+ Then Validate the values of records transferred to target Oracle table is equal to the values from source BigQuery table
diff --git a/oracle-plugin/src/e2e-test/features/source/OracleDesignTime.feature b/oracle-plugin/src/e2e-test/features/source/OracleDesignTime.feature
new file mode 100644
index 000000000..5bd52b7a5
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/source/OracleDesignTime.feature
@@ -0,0 +1,84 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle source- Verify Oracle source plugin design time scenarios
+
+ @ORACLE_SOURCE_TEST
+ Scenario: To verify Oracle source plugin validation with connection and basic details for connectivity
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+
+ @ORACLE_SOURCE_TEST
+ Scenario: To verify Oracle source plugin validation with connection arguments
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter key value pairs for plugin property: "connectionArguments" with values from json: "connectionArgumentsList"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+
+ @ORACLE_SOURCE_TEST
+ Scenario: To verify Oracle source plugin validation with advanced details
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "defaultBatchValue" with value: "batchValue"
+ Then Replace input plugin property: "defaultRowPrefetch" with value: "rowPrefetch"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
diff --git a/oracle-plugin/src/e2e-test/features/source/OracleDesignTimeValidation.feature b/oracle-plugin/src/e2e-test/features/source/OracleDesignTimeValidation.feature
new file mode 100644
index 000000000..9efb734db
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/source/OracleDesignTimeValidation.feature
@@ -0,0 +1,201 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle source- Verify Oracle source plugin design time validation scenarios
+
+ Scenario: To verify Oracle source plugin validation errors for mandatory fields
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Validate button
+ Then Verify mandatory property error for below listed properties:
+ | jdbcPluginName |
+ | database |
+ | referenceName |
+ | importQuery |
+
+ Scenario: To verify Oracle source plugin validation error message with invalid reference test data
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "invalidRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "referenceName" is displaying an in-line error message: "errorMessageOracleInvalidReferenceName"
+
+ Scenario: To verify Oracle source plugin validation error message with blank bounding query
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "invalidDatabaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "invalidImportQuery"
+ Then Replace input plugin property: "splitBy" with value: "splitBy"
+ Then Replace input plugin property: "numSplits" with value: "numberOfSplits"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "boundingQuery" is displaying an in-line error message: "errorMessageBoundingQuery"
+ Then Verify that the Plugin Property: "numSplits" is displaying an in-line error message: "errorMessageBoundingQuery"
+ Then Verify that the Plugin Property: "importQuery" is displaying an in-line error message: "errorMessageInvalidImportQuery"
+
+ Scenario: To verify Oracle source plugin validation error message with number of splits without split by field name
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "DatabaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "numSplits" with value: "numberOfSplits"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "numSplits" is displaying an in-line error message: "errorMessageBlankSplitBy"
+ Then Verify that the Plugin Property: "splitBy" is displaying an in-line error message: "errorMessageBlankSplitBy"
+
+ Scenario: To verify Oracle source plugin validation error message when number of Split value is not a number
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "DatabaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "numSplits" with value: "zeroSplits"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "numSplits" is displaying an in-line error message: "errorMessageNumberOfSplitNotNumber"
+
+ Scenario: To verify Oracle source plugin validation error message when number of Split value is changed to zero
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "DatabaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "numSplits" with value: "zeroValue"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "numSplits" is displaying an in-line error message: "errorMessageInvalidNumberOfSplits"
+
+ Scenario: To verify Oracle source plugin validation error message when fetch size is changed to zero
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "DatabaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "fetchSize" with value: "zeroValue"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "fetchSize" is displaying an in-line error message: "errorMessageInvalidFetchSize"
+
+ Scenario: To verify Oracle source plugin validation error message with invalid database
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "invalidDatabase"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Validate button
+ Then Verify that the Plugin is displaying an error message: "errorMessageInvalidSourceDatabase" on the header
+
+ Scenario: To verify Oracle source plugin validation error message with invalid import query
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "invalidImportQuery"
+ Then Replace input plugin property: "numSplits" with value: "numberOfSplits"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "importQuery" is displaying an in-line error message: "errorMessageInvalidImportQuery"
+
+ Scenario: To verify Oracle source plugin validation error message with blank username
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "invalidImportQuery"
+ Then Click on the Validate button
+ Then Verify that the Plugin Property: "user" is displaying an in-line error message: "errorMessageBlankUsername"
diff --git a/oracle-plugin/src/e2e-test/features/source/OracleDesignTimeWithMacro.feature b/oracle-plugin/src/e2e-test/features/source/OracleDesignTimeWithMacro.feature
new file mode 100644
index 000000000..849cb2fbe
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/source/OracleDesignTimeWithMacro.feature
@@ -0,0 +1,57 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle source- Verify Oracle source plugin design time macro scenarios
+
+ Scenario: To verify Oracle source plugin validation with macro enabled fields for connection section
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUser"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Click on the Macro button of Property: "transactionIsolationLevel" and set the value to: "oracleTransactionLevel"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabase"
+ Then Click on the Macro button of Property: "connectionArguments" and set the value to: "oracleConnectionArguments"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+
+ Scenario: To verify Oracle source plugin validation with macro enabled fields for basic section
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Click on the Macro button of Property: "splitBy" and set the value to: "oracleSplitBy"
+ Then Click on the Macro button of Property: "fetchSize" and set the value to: "oracleFetchSize"
+ Then Click on the Macro button of Property: "boundingQuery" and set the value in textarea: "oracleBoundingQuery"
+ Then Click on the Macro button of Property: "importQuery" and set the value in textarea: "oracleImportQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
diff --git a/oracle-plugin/src/e2e-test/features/source/OracleRunTime.feature b/oracle-plugin/src/e2e-test/features/source/OracleRunTime.feature
new file mode 100644
index 000000000..2d1ca9ad1
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/source/OracleRunTime.feature
@@ -0,0 +1,440 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle - Verify data transfer from Oracle source to BigQuery sink
+
+ @ORACLE_SOURCE_TEST @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
+
+ @ORACLE_SOURCE_DATATYPES_TEST2 @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully using long datatypes
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesSchema2"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
+
+ @ORACLE_SOURCE_LONGRAW_TEST @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully using long raw datatypes
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesSchema3"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
+
+ @ORACLE_SOURCE_DATATYPES_TEST4 @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully using long Varchar datatypes
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesSchema4"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
+
+ @ORACLE_SOURCE_DATATYPES_TEST4 @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully when connection arguments are set
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter key value pairs for plugin property: "connectionArguments" with values from json: "connectionArgumentsList"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesSchema4"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
+
+ @ORACLE_SOURCE_TEST @BQ_SINK_TEST
+ Scenario: To verify the pipeline fails while preview with invalid bounding query setting the split-By field
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "splitBy" with value: "splitBy"
+ Then Enter textarea plugin property: "importQuery" with value: "importQuery"
+ Then Click on the Get Schema button
+ Then Replace input plugin property: "numSplits" with value: "numberOfSplits"
+ Then Enter textarea plugin property: "boundingQuery" with value: "invalidBoundingQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Verify the preview run status of pipeline in the logs is "failed"
+
+ @ORACLE_SOURCE_TEST @BQ_SINK_TEST
+ Scenario: To verify pipeline failure message in logs when an invalid bounding query is provided
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Replace input plugin property: "splitBy" with value: "splitBy"
+ Then Enter textarea plugin property: "importQuery" with value: "importQuery"
+ Then Click on the Get Schema button
+ Then Replace input plugin property: "numSplits" with value: "numberOfSplits"
+ Then Enter textarea plugin property: "boundingQuery" with value: "invalidBoundingQueryValue"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ And Save and Deploy Pipeline
+ And Run the Pipeline in Runtime
+ And Wait till pipeline is in running state
+ And Verify the pipeline status is "Failed"
+ Then Open Pipeline logs and verify Log entries having below listed Level and Message:
+ | Level | Message |
+ | ERROR | errorLogsMessageInvalidBoundingQuery |
+
+ @ORACLE_SOURCE_DATATYPES_TEST1 @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully using different datatypes
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesSchema1"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
+
+ @ORACLE_SOURCE_DATATYPES_TEST @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink successfully using multiple datatypes
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Click on the Get Schema button
+ Then Verify the Output Schema matches the Expected Schema: "outputDatatypesSchema"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Replace input plugin property: "project" with value: "projectId"
+ Then Enter input plugin property: "datasetProject" with value: "projectId"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Enter input plugin property: "dataset" with value: "dataset"
+ Then Enter input plugin property: "table" with value: "bqTargetTable"
+ Then Click plugin property: "truncateTable"
+ Then Click plugin property: "updateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
diff --git a/oracle-plugin/src/e2e-test/features/source/OracleRunTimeMacro.feature b/oracle-plugin/src/e2e-test/features/source/OracleRunTimeMacro.feature
new file mode 100644
index 000000000..106d4ec36
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/features/source/OracleRunTimeMacro.feature
@@ -0,0 +1,307 @@
+#
+# Copyright © 2023 Cask Data, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+
+@Oracle @Oracle_Required
+Feature: Oracle - Verify Oracle plugin data transfer with macro arguments
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify data is getting transferred from Oracle to Oracle successfully using macro arguments in connection section
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUsername"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabase"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUsername"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Validate "Oracle2" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Enter runtime argument value "driverName" for key "oracleDriverName"
+ Then Enter runtime argument value from environment variable "host" for key "oracleHost"
+ Then Enter runtime argument value from environment variable "port" for key "oraclePort"
+ Then Enter runtime argument value from environment variable "username" for key "oracleUsername"
+ Then Enter runtime argument value from environment variable "password" for key "oraclePassword"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabase"
+ Then Run the preview of pipeline with runtime arguments
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Enter runtime argument value "driverName" for key "oracleDriverName"
+ Then Enter runtime argument value from environment variable "host" for key "oracleHost"
+ Then Enter runtime argument value from environment variable "port" for key "oraclePort"
+ Then Enter runtime argument value from environment variable "username" for key "oracleUsername"
+ Then Enter runtime argument value from environment variable "password" for key "oraclePassword"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabase"
+ Then Run the Pipeline in Runtime with runtime arguments
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target table is equal to the values from source table
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify data is getting transferred from Oracle to Oracle successfully using macro arguments in basic section
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Click on the Macro button of Property: "splitBy" and set the value to: "oracleSplitByColumn"
+ Then Click on the Macro button of Property: "importQuery" and set the value in textarea: "oracleImportQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabaseName"
+ Then Click on the Macro button of Property: "tableName" and set the value to: "oracleTableName"
+ Then Click on the Macro button of Property: "dbSchemaName" and set the value to: "oracleSchemaName"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle2" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Enter runtime argument value "splitByColumn" for key "oracleSplitByColumn"
+ Then Enter runtime argument value "selectQuery" for key "oracleImportQuery"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabaseName"
+ Then Enter runtime argument value "targetTable" for key "oracleTableName"
+ Then Enter runtime argument value "schema" for key "oracleSchemaName"
+ Then Run the preview of pipeline with runtime arguments
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Enter runtime argument value "splitByColumn" for key "oracleSplitByColumn"
+ Then Enter runtime argument value "selectQuery" for key "oracleImportQuery"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabaseName"
+ Then Enter runtime argument value "targetTable" for key "oracleTableName"
+ Then Enter runtime argument value "schema" for key "oracleSchemaName"
+ Then Run the Pipeline in Runtime with runtime arguments
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target table is equal to the values from source table
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify pipeline preview fails when invalid connection details provided using macro arguments
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUsername"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabase"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUsername"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabase"
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Replace input plugin property: "tableName" with value: "targetTable"
+ Then Replace input plugin property: "dbSchemaName" with value: "schema"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle2" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Enter runtime argument value "invalidDriverName" for key "oracleDriverName"
+ Then Enter runtime argument value "invalidHost" for key "oracleHost"
+ Then Enter runtime argument value "invalidPort" for key "oraclePort"
+ Then Enter runtime argument value "invalidUserName" for key "oracleUsername"
+ Then Enter runtime argument value "invalidPassword" for key "oraclePassword"
+ Then Enter runtime argument value "invalidDatabaseName" for key "oracleDatabase"
+ Then Run the preview of pipeline with runtime arguments
+ Then Verify the preview of pipeline is "Failed"
+
+ @ORACLE_SOURCE_TEST @ORACLE_TARGET_TEST
+ Scenario: To verify pipeline preview fails when invalid basic details provided using macro arguments
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "Oracle" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "Oracle2" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Replace input plugin property: "database" with value: "databaseName"
+ Then Click on the Macro button of Property: "importQuery" and set the value in textarea: "oracleInvalidImportQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "Oracle2"
+ Then Select dropdown plugin property: "select-jdbcPluginName" with option value: "driverName"
+ Then Replace input plugin property: "host" with value: "host" for Credentials and Authorization related fields
+ Then Replace input plugin property: "port" with value: "port" for Credentials and Authorization related fields
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabaseName"
+ Then Click on the Macro button of Property: "tableName" and set the value to: "oracleTableName"
+ Then Click on the Macro button of Property: "dbSchemaName" and set the value to: "oracleSchemaName"
+ Then Replace input plugin property: "user" with value: "username" for Credentials and Authorization related fields
+ Then Replace input plugin property: "password" with value: "password" for Credentials and Authorization related fields
+ Then Enter input plugin property: "referenceName" with value: "targetRef"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Validate "Oracle2" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Enter runtime argument value "invalidTableNameImportQuery" for key "oracleInvalidImportQuery"
+ Then Enter runtime argument value "invalidDatabaseName" for key "oracleDatabaseName"
+ Then Enter runtime argument value "oracleInvalidTable" for key "oracleTableName"
+ Then Enter runtime argument value "schema" for key "oracleSchemaName"
+ Then Run the preview of pipeline with runtime arguments
+ Then Verify the preview of pipeline is "Failed"
+
+ @ORACLE_SOURCE_TEST @BQ_SINK_TEST
+ Scenario: To verify data is getting transferred from Oracle source to BigQuery sink using macro arguments
+ Given Open Datafusion Project to configure pipeline
+ When Expand Plugin group in the LHS plugins list: "Source"
+ When Select plugin: "Oracle" from the plugins list as: "Source"
+ When Expand Plugin group in the LHS plugins list: "Sink"
+ When Select plugin: "BigQuery" from the plugins list as: "Sink"
+ Then Connect plugins: "Oracle" and "BigQuery" to establish connection
+ Then Navigate to the properties page of plugin: "Oracle"
+ Then Click on the Macro button of Property: "jdbcPluginName" and set the value to: "oracleDriverName"
+ Then Click on the Macro button of Property: "host" and set the value to: "oracleHost"
+ Then Click on the Macro button of Property: "port" and set the value to: "oraclePort"
+ Then Click on the Macro button of Property: "user" and set the value to: "oracleUsername"
+ Then Click on the Macro button of Property: "password" and set the value to: "oraclePassword"
+ Then Select radio button plugin property: "connectionType" with value: "service"
+ Then Select radio button plugin property: "role" with value: "normal"
+ Then Click on the Macro button of Property: "database" and set the value to: "oracleDatabase"
+ Then Enter input plugin property: "referenceName" with value: "sourceRef"
+ Then Enter textarea plugin property: "importQuery" with value: "selectQuery"
+ Then Validate "Oracle" plugin properties
+ Then Close the Plugin Properties page
+ Then Navigate to the properties page of plugin: "BigQuery"
+ Then Enter input plugin property: "referenceName" with value: "BQReferenceName"
+ Then Click on the Macro button of Property: "projectId" and set the value to: "bqProjectId"
+ Then Click on the Macro button of Property: "datasetProjectId" and set the value to: "bqDatasetProjectId"
+ Then Click on the Macro button of Property: "dataset" and set the value to: "bqDataset"
+ Then Click on the Macro button of Property: "table" and set the value to: "bqTable"
+ Then Click on the Macro button of Property: "truncateTableMacroInput" and set the value to: "bqTruncateTable"
+ Then Click on the Macro button of Property: "updateTableSchemaMacroInput" and set the value to: "bqUpdateTableSchema"
+ Then Validate "BigQuery" plugin properties
+ Then Close the Plugin Properties page
+ Then Save the pipeline
+ Then Preview and run the pipeline
+ Then Enter runtime argument value "driverName" for key "oracleDriverName"
+ Then Enter runtime argument value from environment variable "host" for key "oracleHost"
+ Then Enter runtime argument value from environment variable "port" for key "oraclePort"
+ Then Enter runtime argument value from environment variable "username" for key "oracleUsername"
+ Then Enter runtime argument value from environment variable "password" for key "oraclePassword"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabase"
+ Then Enter runtime argument value "projectId" for key "bqProjectId"
+ Then Enter runtime argument value "projectId" for key "bqDatasetProjectId"
+ Then Enter runtime argument value "dataset" for key "bqDataset"
+ Then Enter runtime argument value "bqTargetTable" for key "bqTable"
+ Then Enter runtime argument value "bqTruncateTable" for key "bqTruncateTable"
+ Then Enter runtime argument value "bqUpdateTableSchema" for key "bqUpdateTableSchema"
+ Then Run the preview of pipeline with runtime arguments
+ Then Wait till pipeline preview is in running state
+ Then Open and capture pipeline preview logs
+ Then Verify the preview run status of pipeline in the logs is "succeeded"
+ Then Close the pipeline logs
+ Then Close the preview
+ Then Deploy the pipeline
+ Then Run the Pipeline in Runtime
+ Then Enter runtime argument value "driverName" for key "oracleDriverName"
+ Then Enter runtime argument value from environment variable "host" for key "oracleHost"
+ Then Enter runtime argument value from environment variable "port" for key "oraclePort"
+ Then Enter runtime argument value from environment variable "username" for key "oracleUsername"
+ Then Enter runtime argument value from environment variable "password" for key "oraclePassword"
+ Then Enter runtime argument value "databaseName" for key "oracleDatabase"
+ Then Enter runtime argument value "projectId" for key "bqProjectId"
+ Then Enter runtime argument value "projectId" for key "bqDatasetProjectId"
+ Then Enter runtime argument value "dataset" for key "bqDataset"
+ Then Enter runtime argument value "bqTargetTable" for key "bqTable"
+ Then Enter runtime argument value "bqTruncateTable" for key "bqTruncateTable"
+ Then Enter runtime argument value "bqUpdateTableSchema" for key "bqUpdateTableSchema"
+ Then Run the Pipeline in Runtime with runtime arguments
+ Then Wait till pipeline is in running state
+ Then Open and capture logs
+ Then Verify the pipeline status is "Succeeded"
+ Then Close the pipeline logs
+ Then Validate the values of records transferred to target Big Query table is equal to the values from source table
diff --git a/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java b/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java
new file mode 100644
index 000000000..b7d93c80a
--- /dev/null
+++ b/oracle-plugin/src/e2e-test/java/io.cdap.plugin/BQValidation.java
@@ -0,0 +1,221 @@
+/*
+ * Copyright © 2023 Cask Data, Inc.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License"); you may not
+ * use this file except in compliance with the License. You may obtain a copy of
+ * the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package io.cdap.plugin;
+
+import com.google.cloud.bigquery.TableResult;
+import com.google.gson.Gson;
+import com.google.gson.JsonObject;
+import io.cdap.e2e.utils.BigQueryClient;
+import io.cdap.e2e.utils.PluginPropertyUtils;
+import io.cdap.plugin.oracle.OracleSourceSchemaReader;
+import org.junit.Assert;
+
+import java.io.IOException;
+import java.sql.Connection;
+import java.sql.ResultSet;
+import java.sql.ResultSetMetaData;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.sql.Timestamp;
+import java.sql.Types;
+import java.text.ParseException;
+import java.text.SimpleDateFormat;
+import java.util.ArrayList;
+import java.util.Base64;
+import java.util.Date;
+import java.util.List;
+
+/**
+ * BQValidation.
+ */
+
+public class BQValidation {
+
+ /**
+ * Extracts entire data from source and target tables.
+ *
+ * @param sourceTable table at the source side
+ * @param targetTable table at the sink side
+ * @return true if the values in source and target side are equal
+ */
+
+ public static boolean validateDBToBQRecordValues(String schema, String sourceTable, String targetTable)
+ throws SQLException, ClassNotFoundException, ParseException, IOException, InterruptedException {
+ List jsonResponse = new ArrayList<>();
+ List