diff --git a/.java-version b/.java-version new file mode 100644 index 0000000..03b6389 --- /dev/null +++ b/.java-version @@ -0,0 +1 @@ +17.0 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md old mode 100755 new mode 100644 diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md old mode 100755 new mode 100644 diff --git a/README.md b/README.md index 4230456..717d633 100644 --- a/README.md +++ b/README.md @@ -26,11 +26,14 @@ There are two main use cases: [![Watch the video](docs/user/commerce-db-sync-demo.png)](https://video.sap.com/embed/secure/iframe/entryId/1_7bhihtlz/uiConfId/30317401/st/0) +## Release v1.2 + +Features and changes [presentation video](https://sapvideoa35699dc5.hana.ondemand.com/?entry_id=1_sipgb1l8). # Features Overview - Database Connectivity - - Multipe supported databases: Oracle, MySQL, HANA, MSSQL + - Multipe supported databases: Oracle, MySQL, HANA, MSSQL, PostgreSQL - UI based connection validation - Schema Differences - UI based schema differences detector @@ -49,6 +52,7 @@ There are two main use cases: - Table exclusions/inclusions - Incremental mode (delta) - Custom tables + - Resume failed migration - Staged approach using table prefix - View usage instead of table - Reporting / Audit @@ -56,6 +60,7 @@ There are two main use cases: - Automated reporting for copy processes - Stored on blob storage - Logging of all actions triggered from the UI + - JDBC logging # Requirements @@ -63,20 +68,22 @@ There are two main use cases: - Tested with source databases: - Azure SQL - MySQL (5.7) - - Oracle (XE 11g) + - Oracle (XE 11g, XE 18c) - HANA (express 2.0) and HANA Cloud + - PostgreSQL 15.x - Tested with target databases: - Azure SQL - - Oracle (XE 11g) + - Oracle (XE 11g, XE 18c) - HANA (express 2.0) and HANA Cloud + - PostgreSQL 15.x # Performance Commerce DB Sync has been built to offer reasonable performance with large amount of data using the following design: - Table to table replication using JDBC (low level) -- Selection of tables so we do not need a full synchronization in particular for large technical table (task logs, audit logs...)​ -- Multi-threaded and can manage multiple tables at the same time ​ +- Selection of tables so we do not need a full synchronization in particular for large technical table (task logs, audit logs...) +- Multi-threaded and can manage multiple tables at the same time - Using UPSERT (INSERT/UPDATE) - Use read replica Commerce database as a source database diff --git a/commercedbsync/.project b/commercedbsync/.project deleted file mode 100644 index 3efecdf..0000000 --- a/commercedbsync/.project +++ /dev/null @@ -1,28 +0,0 @@ - - - commercedbsync - - - - - - org.eclipse.jdt.core.javabuilder - - - - - org.eclipse.ui.externaltools.ExternalToolBuilder - full,incremental, - - - LaunchConfigHandle - <project>/.externalToolBuilders/com.hybris.hyeclipse.tsv.builder.launch - - - - - - com.hybris.hyeclipse.tsv.hybris - org.eclipse.jdt.core.javanature - - diff --git a/commercedbsync/.springBeans b/commercedbsync/.springBeans deleted file mode 100644 index e476d04..0000000 --- a/commercedbsync/.springBeans +++ /dev/null @@ -1,15 +0,0 @@ - - - 1 - - - - - - - resources/commercedbsync-spring.xml - web/webroot/WEB-INF/commercedbsync-web-spring.xml - - - - diff --git a/commercedbsync/external-dependencies.xml b/commercedbsync/external-dependencies.xml index 4abd5ae..7f1c4e8 100644 --- a/commercedbsync/external-dependencies.xml +++ b/commercedbsync/external-dependencies.xml @@ -36,7 +36,7 @@ com.zaxxer HikariCP - 3.4.5 + 5.0.1 com.github.freva diff --git a/commercedbsync/project.properties b/commercedbsync/project.properties index 948700f..d5f563b 100644 --- a/commercedbsync/project.properties +++ b/commercedbsync/project.properties @@ -4,106 +4,538 @@ # # commercedbsync.application-context=commercedbsync-spring.xml - -################################ -# Migration specific properties -################################ +## +# Specifies the profile name of data source that serves as migration input +# +# @values name of the data source profile +# @optional true +## +migration.input.profiles=source +## +# Specifies the profile name of data sources that serves as migration output +# +# @values name of the data source profile +# @optional true +## +migration.output.profiles=target +## +# Specifies the driver class for the source jdbc connection +# +# @values any valid jdbc driver class +# @optional false +## migration.ds.source.db.driver= +## +# Specifies the url for the source jdbc connection +# +# @values any valid jdbc url +# @optional false +## migration.ds.source.db.url= +## +# Specifies the user name for the source jdbc connection +# +# @values any valid user name for the jdbc connection +# @optional false +## migration.ds.source.db.username= +## +# Specifies the password for the source jdbc connection +# +# @values any valid password for the jdbc connection +# @optional false +## migration.ds.source.db.password= +## +# Specifies the table prefix used on the source commerce database. +# This may be relevant if a commerce installation was initialized using 'db.tableprefix'. +# +# @values any valid commerce database table prefix. +# @optional true +## migration.ds.source.db.tableprefix= +## +# Specifies the schema the respective commerce installation is deployed to. +# +# @values any valid schema name for the commerce installation +# @optional false +## migration.ds.source.db.schema= +## +# Specifies the name of the type system that should be taken into account +# +# @values any valid type system name +# @optional true +## migration.ds.source.db.typesystemname=DEFAULT +## +# Specifies the suffix which is used for the source typesystem +# +# @values the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' +# @optional true +# @dependency migration.ds.source.db.typesystemname +## migration.ds.source.db.typesystemsuffix= -migration.ds.source.db.connection.removeabandoned=true +## +# Specifies minimum amount of idle connections available in the source db pool +# +# @values integer value +# @optional false +## migration.ds.source.db.connection.pool.size.idle.min=${db.pool.minIdle} +## +# Specifies maximum amount of connections in the source db pool +# +# @values integer value +# @optional false +## migration.ds.source.db.connection.pool.size.idle.max=${db.pool.maxIdle} +## +# Specifies maximum amount of active connections in the source db pool +# +# @values integer value +# @optional false +## migration.ds.source.db.connection.pool.size.active.max=${db.pool.maxActive} +## +# Specifies the driver class for the target jdbc connection +# +# @values any valid jdbc driver class +# @optional false +## migration.ds.target.db.driver=${db.driver} +## +# Specifies the url for the target jdbc connection +# +# @values any valid jdbc url +# @optional false +## migration.ds.target.db.url=${db.url} +## +# Specifies the user name for the target jdbc connection +# +# @values any valid user name for the jdbc connection +# @optional false +## migration.ds.target.db.username=${db.username} +## +# Specifies the password for the target jdbc connection +# +# @values any valid password for the jdbc connection +# @optional false +## migration.ds.target.db.password=${db.password} +## +# Specifies the table prefix used on the target commerce database. +# This may be relevant if a commerce installation was initialized using `${db.tableprefix}` / staged approach. +# +# @values any valid commerce database table prefix. +# @optional true +## migration.ds.target.db.tableprefix=${db.tableprefix} migration.ds.target.db.catalog= +## +# Specifies the schema the target commerce installation is deployed to. +# +# @values any valid schema name for the commerce installation +# @optional false +## migration.ds.target.db.schema=dbo +## +# Specifies the name of the type system that should be taken into account +# +# @values any valid type system name +# @optional true +## migration.ds.target.db.typesystemname=DEFAULT +## +# Specifies the suffix which is used for the target typesystem +# +# @values the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' +# @optional true +# @dependency migration.ds.source.db.typesystemname +## migration.ds.target.db.typesystemsuffix= -migration.ds.target.db.connection.removeabandoned=true +## +# Specifies minimum amount of idle connections available in the target db pool +# +# @values integer value +# @optional false +## migration.ds.target.db.connection.pool.size.idle.min=${db.pool.minIdle} +## +# Specifies maximum amount of idle connections available in the target db pool +# +# @values integer value +# @optional false +## migration.ds.target.db.connection.pool.size.idle.max=${db.pool.maxIdle} +## +# Specifies maximum amount of connections in the target db pool +# +# @values integer value +# @optional false +## migration.ds.target.db.connection.pool.size.active.max=${db.pool.maxActive} +## +# When using the staged approach, multiple sets of commerce tables may exists (each having its own tableprefix). +# To prevent cluttering the db, this property specifies the maximum number of table sets that can exist, +# if exceeded the schema migrator will complain and suggest a cleanup. +# +# @values integer value +# @optional true +## migration.ds.target.db.max.stage.migrations=5 -#triggered by updatesystem process or manually by hac +## +# Specifies whether the data migration shall be triggered by the 'update running system' operation. +# +# @values true or false +# @optional true +## migration.trigger.updatesystem=false -# Schema migration section - parameters for copying schema from source to target +## +# Globally enables / disables schema migration. If set to false, no schema changes will be applied. +# +# @values true or false +# @optional true +## migration.schema.enabled=true +## +# Specifies if tables which are missing in the target should be added by schema migration. +# +# @values true or false +# @optional true +# @dependency migration.schema.enabled +## migration.schema.target.tables.add.enabled=true +## +# Specifies if extra tables in target (compared to source schema) should be removed by schema migration. +# +# @values true or false +# @optional true +# @dependency migration.schema.enabled +## migration.schema.target.tables.remove.enabled=false +## +# Specifies if columns which are missing in the target tables should be added by schema migration. +# +# @values true or false +# @optional true +# @dependency migration.schema.enabled +## migration.schema.target.columns.add.enabled=true +## +# Specifies if extra columns in target tables (compared to source schema) should be removed by schema migration. +# +# @values true or false +# @optional true +# @dependency migration.schema.enabled +## migration.schema.target.columns.remove.enabled=true -# automatically trigger schema migrator before data copy process is started +## +# Specifies if the schema migrator should be automatically triggered before data copy process is started +# +# @values true or false +# @optional true +# @dependency migration.schema.enabled +## migration.schema.autotrigger.enabled=false -# the number of rows read per iteration +## +# Activate data export to external DB via cron jobs +# +# @values true or false +# @optional true +## +migration.data.export.enabled=false +# Specifies the number of rows to read per batch. This only affects tables which can be batched. +# +# @values integer value +# @optional true +## migration.data.reader.batchsize=1000 -# delete rows in target table before inserting new records +## +# Specifies if the target tables should be truncated before data is copied over. +# +# @values true or false +# @optional true +## migration.data.truncate.enabled=true -# These tables will not be emptied before records are inserted +## +# If truncation of target tables is enabled, this property specifies tables that should be excluded from truncation. +# +# @values comma separated list of table names +# @optional true +# @dependency migration.data.truncate.enabled +## migration.data.truncate.excluded= -# maximum number of writer workers per table that can be executed in parallel within a single node in the cluster +## +# Specifies the number of threads used per table to write data to target. +# Note that this value applies per table, so in total the number of threads will depend on +# 'migration.data.maxparalleltablecopy'. +# [total number of writer threads] = [migration.data.workers.writer.maxtasks] * [migration.data.maxparalleltablecopy] +# +# @values integer value +# @optional true +# @dependency migration.data.maxparalleltablecopy +## migration.data.workers.writer.maxtasks=10 -# maximum number of reader workers per table that can be executed in parallel within a single node in the cluster +## +# Specifies the number of threads used per table to read data from source. +# Note that this value applies per table, so in total the number of threads will depend on +# 'migration.data.maxparalleltablecopy'. +# [total number of reader threads] = [migration.data.workers.reader.maxtasks] * [migration.data.maxparalleltablecopy] + +# @values integer value +# @optional true +# @dependency migration.data.maxparalleltablecopy +## migration.data.workers.reader.maxtasks=3 -# max retry attempts of a worker in case there is a problem +## +# Specifies the number of retries in case a worker task fails. +# +# @values integer value +# @optional true +## migration.data.workers.retryattempts=0 -# maximum number of table that can be copied in parallel within a single node in the cluster +## +# Specifies the number of tables that are copied over in parallel. +# +# @values integer value +# @optional true +## migration.data.maxparalleltablecopy=2 -# ignores data insertion errors and continues to the next records +## +# If set to true, the migration will abort as soon as an error occured. +# If set to false, the migration will try to continue if the state of the runtime allows. +# +# @values true or false +# @optional true +## migration.data.failonerror.enabled=true -# columns to be excluded. format: migration.data.columns.excluded.= +## +# Specifies the columns to be excluded +# +# @values migration.data.columns.excluded.[tablename]=[comma separated list of column names] +# @optional true +## migration.data.columns.excluded.attributedescriptors= +## +# Specifies the columns to be nullified. Whatever value there was will be replaced with NULL in the target column. +# +# @values migration.data.columns.nullify.[tablename]=[comma separated list of column names] +# @optional true +## migration.data.columns.nullify.attributedescriptors= -#remove all indices +## +# If set to true, all indices in the target table will be removed before copying over the data. +# +# @values true of false +# @optional true +## migration.data.indices.drop.enabled=false -#do not recreate following indices after the migration. Comma separated values +## +# do not recreate following indices after the migration. Comma separated values +# +# @values comma separated values +# @optional true +## migration.data.indices.drop.recreate.exclude= -#disable indices during migration +## +# If set to true, all indices in the target table will be disabled (NOT removed) before copying over the data. +# After the data copy the indices will be enabled and rebuilt again. +# +# @values true of false +# @optional true +## migration.data.indices.disable.enabled=false -#if empty, disable indices on all tables. If table specified, only disable for this one. +## +# If disabling of indices is enabled, this property specifies the tables that should be included. +# If no tables specified, indices for all tables will be disabled. +# +# @values comma separated list of tables +# @optional true +# @dependency migration.data.indices.disable.enabled +## migration.data.indices.disable.included= -#flag to enable the migration of audit tables +## +# Flag to enable the migration of audit tables. +# +# @values true or false +# @optional true +## migration.data.tables.audit.enabled=true -#custom tables to migrate (use comma-separated list) +## +# Specifies a list of custom tables to migrate. Custom tables are tables that are not part of the commerce type system. +# +# @values comma separated list of table names. +# @optional true +## migration.data.tables.custom= -#tables to exclude (use table names name without prefix) -migration.data.tables.excluded=SYSTEMINIT,StoredHttpSessions -#tables to include (use table names name without prefix) +## +# Tables to exclude from migration (use table names name without prefix) +# +# @values comma separated list of table names. +# @optional true +## +migration.data.tables.excluded=SYSTEMINIT,StoredHttpSessions,itemdeletionmarkers +## +# Tables to include (use table names name without prefix) +# +# @values comma separated list of table names. +# @optional true +## migration.data.tables.included= +## +# Run migration in the cluster (based on commerce cluster config). The 'HAC' node will be the primary one. +# A scheduling algorithm decides which table will run on which node. Nodes are notified using cluster events. +# +# @values true or false +# @optional true +## migration.cluster.enabled=false -#enable the incremental database migration. +## +# If set to true, the migration will resume from where it stopped (either due to errors or cancellation). +# +# @values true or false +# @optional true +## +migration.scheduler.resume.enabled=false +## +# If set to true, the migration will run in incremental mode. Only rows that were modified after a given timestamp +# will be taken into account. +# +# @values true or false +# @optional true +## migration.data.incremental.enabled=false -#Only these tables will be taken into account for incremental migration. +## +# Only these tables will be taken into account for incremental migration. +# +# @values comma separated list of tables. +# @optional true +# @dependency migration.data.incremental.enabled +## migration.data.incremental.tables= -#The timestamp in ISO-8601 ISO_ZONED_DATE_TIME format. Records created or modified after this timestamp will be copied only. +## +# Records created or modified after this timestamp will be copied only. +# +# @values The timestamp in ISO-8601 ISO_ZONED_DATE_TIME format +# @optional true +# @dependency migration.data.incremental.enabled +## migration.data.incremental.timestamp= -#EXPERIMENTAL: Enable bulk copy for better performance -migration.data.bulkcopy.enabled=false +## +# Specifies the timeout of the data pipe. +# +# @values integer value +# @optional true +## migration.data.pipe.timeout=7200 +## +# Specifies the capacity of the data pipe. +# +# @values integer value +# @optional true +## migration.data.pipe.capacity=100 -# No activity? -> migration aborted and marked as stalled +## +# Specifies the timeout of the migration monitor. +# If there was no activity for too long the migration will be marked as 'stalled' and aborted. +# +# @values integer value +# @optional true +## migration.stalled.timeout=7200 -migration.data.timeout=60 +## +# Specifies blob storage connection string for storing reporting files. +# +# @values any azure blob storage connection string +# @optional true +## migration.data.report.connectionstring=${media.globalSettings.cloudAzureBlobStorageStrategy.connection} -# Properties that will be masked in the report +## +# Specifies the properties that should be masked in HAC. +# +# @values any property key +# @optional true +## migration.properties.masked=migration.data.report.connectionstring,migration.ds.source.db.password,migration.ds.target.db.password +## +# Specifies the default locale used. +# +# @values any locale +# @optional true +## migration.locale.default=en-US -# support views during data migration -## string pattern for view naming convention with '%s' as table name. e.g. v_%s -migration.data.view.name.pattern=v_%s -# DDL View Generation -# more information on https://github.tools.sap/cx-boosters/sap-commerce-db-sync/wiki/Dynamic-View-Generation -migration.data.view.t.{table}.enabled=false -migration.data.view.t.{table}.joinWhereClause={table} -migration.data.view.t.{table}.columnTransformation.{column}=GETDATE() +## +# Support views during data migration. String pattern for view naming convention with `'%s'` as table name. e.g. `v_%s` +# +# @values any string +# @optional true +## +migration.data.view.name.pattern=v_%s +## +# Activate DDL view generation for specific +# +# @values any string +# @optional true +## +migration.data.view.t.TABLE.enabled=false +## +# Activate DDL view generation for specific _TABLE_, with additional `JOIN` clausule +# +# @values any string +# @optional true +# @dependency migration.data.view.t.TABLE.enabled +## +migration.data.view.t.TABLE.joinWhereClause={table} +## +# Possibility to use custom functions to obfuscate values for specific columns +# +# @values any valid SQL function call +# @optional true +# @dependency migration.data.view.t.TABLE.enabled +## +migration.data.view.t.TABLE.columnTransformation.COLUMN=GETDATE() + +## +# If set to true, the JDBC queries ran against the source and target data sources will be logged in the storage pointed by the property {migration.data.report.connectionstring} +# +# @values true or false +# @optional false +## +migration.log.sql=false +## +# Specifies the number of log entries to add to the in-memory collection of JDBC log entries of a JDBC queries store before flushing the collection contents into the blob file storage associated with the JDBC store's data souce and clearing the in-memory collection to free memory +# +# @values an integer number +# @optional 10,000,000 +## +migration.log.sql.memory.flush.threshold.nbentries=10000000 +## +# If set to true, the values of the parameters of the JDBC queries ran against the source data source will be logged in the JDBC queries logs (migration.log.sql has to be true to enable this type of logging). For security reasons, the tool will never log parameter values for the queries ran against the target datasource. +# +# @values true or false +# @optional true +## +migration.log.sql.source.showparameters=true +## +# Specifies the name of the container where the tool will store the files related to migration in the blob storage pointed by the property {migration.data.report.connectionstring} +# +# @values any string +# @optional false +## +migration.data.filestorage.container.name=migration +migration.data.fulldatabase.enabled=true + +# Enhanced Logging +log4j2.appender.migrationAppender.type=Console +log4j2.appender.migrationAppender.name=MigrationAppender +log4j2.appender.migrationAppender.layout.type=PatternLayout +log4j2.appender.migrationAppender.layout.pattern=%-5p [%t] [%c{1}] %X{migrationID,pipeline,clusterID} %m%n +log4j2.logger.migrationToolkit.name=com.sap.cx.boosters.commercedbsync +log4j2.logger.migrationToolkit.level=INFO +log4j2.logger.migrationToolkit.appenderRef.migration.ref=MigrationAppender +log4j2.logger.migrationToolkit.additivity=false diff --git a/commercedbsync/resources/commercedbsync-beans.xml b/commercedbsync/resources/commercedbsync-beans.xml index 72741cf..226f07c 100644 --- a/commercedbsync/resources/commercedbsync-beans.xml +++ b/commercedbsync/resources/commercedbsync-beans.xml @@ -29,6 +29,7 @@ + @@ -36,6 +37,16 @@ + + + + + + + + + + @@ -48,11 +59,13 @@ + + @@ -108,6 +121,7 @@ + @@ -116,16 +130,20 @@ + + - + + + @@ -146,4 +164,8 @@ + + + + diff --git a/commercedbsync/resources/commercedbsync-items.xml b/commercedbsync/resources/commercedbsync-items.xml index 88f37fc..6d1629e 100644 --- a/commercedbsync/resources/commercedbsync-items.xml +++ b/commercedbsync/resources/commercedbsync-items.xml @@ -50,6 +50,30 @@ false + + maximum number of table that can be copied in parallel within a single node in the cluster + + + 2 + + + maximum number of reader workers per table that can be executed in parallel within a single node in the cluster + + + 3 + + + maximum number of writer workers per table that can be executed in parallel within a single node in the cluster + + + 10 + + + the number of rows read per iteration + + + 1000 + - - - - - - - + + + + + + + + - + + + + class="com.sap.cx.boosters.commercedbsync.concurrent.impl.DefaultDataThreadPoolFactory"> + + - - - - + + - - - - - - - - + + @@ -81,7 +76,6 @@ - @@ -100,6 +94,12 @@ + + + + + @@ -113,6 +113,8 @@ + + @@ -127,8 +129,15 @@ - + + + + + + + @@ -136,7 +145,7 @@ class="com.sap.cx.boosters.commercedbsync.service.impl.PipeDatabaseMigrationCopyService"> - + @@ -150,7 +159,6 @@ - @@ -172,6 +180,7 @@ parent="abstractEventListener"> + @@ -200,12 +209,13 @@ + + - - - + @@ -228,6 +238,7 @@ + @@ -289,20 +300,10 @@ - - - - - - + @@ -333,10 +334,4 @@ - - - - - - diff --git a/commercedbsync/resources/impex/essentialdata-commercemigration-jobs.impex b/commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex similarity index 88% rename from commercedbsync/resources/impex/essentialdata-commercemigration-jobs.impex rename to commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex index a30c758..9299434 100644 --- a/commercedbsync/resources/impex/essentialdata-commercemigration-jobs.impex +++ b/commercedbsync/resources/impex/projectdata-commercemigration-jobs.impex @@ -4,12 +4,12 @@ INSERT_UPDATE ServicelayerJob;code[unique=true];springId[unique=true] ;fullMigrationJob;fullMigrationJob # Update details for incremental migration -INSERT_UPDATE IncrementalMigrationCronJob;code[unique=true];active;job(code)[default=incrementalMigrationJob];sessionLanguage(isoCode)[default=en] +INSERT_UPDATE IncrementalMigrationCronJob;code[unique=true];active;job(code)[default=incrementalMigrationJob];sessionLanguage(isoCode)[default=en];maxParallelTableCopy[default=2];maxReaderWorkers[default=3];maxWriterWorkers[default=10];batchSize[default=1000] ;incrementalMigrationJob;true; -INSERT_UPDATE IncrementalMigrationCronJob;code[unique=true];migrationItems +INSERT_UPDATE IncrementalMigrationCronJob;code[unique=true];migrationItems;maxParallelTableCopy[default=2];maxReaderWorkers[default=3];maxWriterWorkers[default=10];batchSize[default=1000] ;incrementalMigrationJob;paymentmodes,addresses,users,cat2prodrel,consignments,orders -INSERT_UPDATE FullMigrationCronJob;code[unique=true];job(code)[default=fullMigrationJob];active;truncateEnabled;fullDatabaseMigration;schemaAutotrigger;sessionLanguage(isoCode)[default=en];migrationItems; +INSERT_UPDATE FullMigrationCronJob;code[unique=true];job(code)[default=fullMigrationJob];active;truncateEnabled;fullDatabaseMigration;schemaAutotrigger;sessionLanguage(isoCode)[default=en];migrationItems;maxParallelTableCopy[default=2];maxReaderWorkers[default=3];maxWriterWorkers[default=10];batchSize[default=1000] ;fullDatabaseMigrationJob;;true;true;true;true;;mediaformatmapping,cat2attrrellp,categories,compositeentries,mediafolders,mediacontextlp,validationconstraintslp,validationconstraints,catalogslp,units,genericitems,pcp2wrtblecvrel,renderertemplate,dynamiccontent,userrightslp,backofficesearchcond,metainformations,unitslp,workflowactions,productprops,scripts,systemsetupaudit,gentestitems,cat2princrel,jalovelocityrenderer,paymentmodeslp,usergroupprops,orderprops,userrights,workflowactionitemsrel,parserproperty,productfeatures,productreferences,commentcompreadrels,languageslp,syncjob2pcplrel,commentitemrelations,jobs,themes,discounts,catalogversionsyncjob,cat2catrel,categorieslp,syncjob2langrel,currencieslp,impexdocumentids,userprofiles,stdpaymmodevals,links,workflowitematts,products,backofficesavedquery,productslp,workflowtemplatelinkrel,previewtickets,backofficecollections,props,retentionrule,syncjob2typerel,commentcompremoverels,genericitemslp,addresses,catalogs,languages,taxeslp,discountslp,distributedbatches,backofficesavedquerylp,searchrestrictions,aclentries,format2medforrel,keywords,paymentmodes,whereparts,commentassignrelations,commentattachments,discountrows,mediacontainerlp,commentdomains,synattcfg,mediacontext,impbatchcontent,classificationattrslp,commenttypes,globaldiscountrows,mediacontainer,searchrestrictionslp,mediaformatlp,catverdiffs,cmptype2covgrprels,workflowtemplprincrel,clattruntlp,jobslp,titles,pendingstepsrelation,themeslp,countries,commentcompwriterels,processedstepsrelation,slactions,productreferenceslp,usergroups,regionslp,userprops,exportslp,numberseries,distributedprocesses,catalogversions,externalimportkey,usergroupslp,cat2attrrel,medias,jobsearchrestriction,triggerscj,addressprops,openidexternalscopes,attr2valuerel,constraintgroup,renderertemplatelp,titleslp,indextestitem,workflowactionlinkrel,workflowactionslp,catalogversionslp,commentwatchrelations,configitems,pcpl2rdblecvrel,abstrcfgproductinfo,users,workflowitemattslp,commentcompcreaterels,derivedmedias,cat2medrel,scriptslp,regions,currencies,steps,deliverymodeslp,classattrvalueslp,mediaformat,zonedeliverymodevalues,configuratorsettings,prod2keywordrel,cat2prodrel,taxes,cat2keywordrel,classattrvalues,ydeployments,cstrgr2abscstrrel,mediaprops,pgrels,zone2country,classificationattrs,taxrows,renderersproperty,cronjobs,commentcomponents,exports,deliverymodes,comments,workflowactioncomments,countrieslp,commentusersettings,format2comtyprel,corsconfigproperty,backofficecollitemrefs,pricerows,agreements,workflowactionsrel,clattrunt,format,changedescriptors,formatlp,zones -;fullTableMigrationJob;;true;true;false;false;;products;paymentmodes +;fullTableMigrationJob;;true;true;false;false;;products,paymentmodes diff --git a/commercedbsync/resources/localization/commercedbsync-locales_en.properties b/commercedbsync/resources/localization/commercedbsync-locales_en.properties index e9a5825..010258d 100644 --- a/commercedbsync/resources/localization/commercedbsync-locales_en.properties +++ b/commercedbsync/resources/localization/commercedbsync-locales_en.properties @@ -17,4 +17,16 @@ type.MigrationCronJob.migrationItems.name=Migration Tables type.MigrationCronJob.migrationItems.description= type.FullMigrationCronJob.fullDatabaseMigration.name=Full Database Migration -type.FullMigrationCronJob.fullDatabaseMigration.description= \ No newline at end of file +type.FullMigrationCronJob.fullDatabaseMigration.description= + +type.MigrationCronJob.maxParallelTableCopy.name=Parallel Tables +type.MigrationCronJob.maxParallelTableCopy.description=Number of tables to be copied in parallel + +type.MigrationCronJob.maxReaderWorkers.name=Reader Workers +type.MigrationCronJob.maxReaderWorkers.description=Number of reader workers to be used for each table + +type.MigrationCronJob.maxWriterWorkers.name=Writer Workers +type.MigrationCronJob.maxWriterWorkers.description=Number of writer workers to be used for each table + +type.MigrationCronJob.batchSize.name=Batch Size +type.MigrationCronJob.batchSize.description=Batch size used to query data diff --git a/commercedbsync/resources/sql/createSchedulerTablesHana.sql b/commercedbsync/resources/sql/createSchedulerTablesHANA.sql similarity index 92% rename from commercedbsync/resources/sql/createSchedulerTablesHana.sql rename to commercedbsync/resources/sql/createSchedulerTablesHANA.sql index bf15d86..d2a9c74 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesHana.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesHANA.sql @@ -14,6 +14,11 @@ IF tablename = 'MIGRATIONTOOLKIT_TABLECOPYSTATUS' AND :found > 0 THEN DROP TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS; END IF; + +IF tablename = 'MIGRATIONTOOLKIT_TABLECOPYBATCHES' AND :found > 0 + THEN +DROP TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES; +END IF; END; # CALL MIGRATION_PROCEDURE('MIGRATIONTOOLKIT_TABLECOPYTASKS'); @@ -32,15 +37,32 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( failure char(1) NOT NULL DEFAULT '0', error NVARCHAR(5000) NULL, published char(1) NOT NULL DEFAULT '0', + truncated char(1) NOT NULL DEFAULT '0', lastupdate Timestamp NOT NULL DEFAULT '0001-01-01 00:00:00', avgwriterrowthroughput numeric(10,2) NULL DEFAULT 0, avgreaderrowthroughput numeric(10,2) NULL DEFAULT 0, + copymethod NVARCHAR(255) NULL, + keycolumns NVARCHAR(255) NULL, durationinseconds numeric(10,2) NULL DEFAULT 0, PRIMARY KEY (migrationid, targetnodeid, pipelinename) ); # +CALL MIGRATION_PROCEDURE('MIGRATIONTOOLKIT_TABLECOPYBATCHES'); +# + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( + migrationId NVARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename NVARCHAR(255) NOT NULL, + lowerBoundary NVARCHAR(255) NOT NULL, + upperBoundary NVARCHAR(255) NULL, + PRIMARY KEY (migrationid, batchId, pipelinename) +); + +# + CALL MIGRATION_PROCEDURE('MIGRATIONTOOLKIT_TABLECOPYSTATUS'); # diff --git a/commercedbsync/resources/sql/createSchedulerTables.sql b/commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql similarity index 87% rename from commercedbsync/resources/sql/createSchedulerTables.sql rename to commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql index bad4a15..a34bc37 100644 --- a/commercedbsync/resources/sql/createSchedulerTables.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesMSSQL.sql @@ -14,13 +14,27 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( failure char(1) NOT NULL DEFAULT '0', error NVARCHAR(MAX) NULL, published char(1) NOT NULL DEFAULT '0', + truncated char(1) NOT NULL DEFAULT '0', lastupdate DATETIME2 NOT NULL DEFAULT '0001-01-01 00:00:00', avgwriterrowthroughput numeric(10,2) NULL DEFAULT 0, avgreaderrowthroughput numeric(10,2) NULL DEFAULT 0, + copymethod NVARCHAR(255) NULL, + keycolumns NVARCHAR(255) NULL, durationinseconds numeric(10,2) NULL DEFAULT 0, PRIMARY KEY (migrationid, targetnodeid, pipelinename) ); +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYBATCHES; + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( + migrationId NVARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename NVARCHAR(255) NOT NULL, + lowerBoundary NVARCHAR(255) NOT NULL, + upperBoundary NVARCHAR(255) NULL, + PRIMARY KEY (migrationid, batchId, pipelinename) +); + DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS; CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS ( @@ -45,12 +59,6 @@ AS BEGIN DECLARE @relevant_count integer = 0 SET NOCOUNT ON - /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -- latest update overall = latest update timestamp of updated tasks UPDATE s SET s.lastUpdate = t.latestUpdate diff --git a/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql b/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql new file mode 100644 index 0000000..1838fe4 --- /dev/null +++ b/commercedbsync/resources/sql/createSchedulerTablesMYSQL.sql @@ -0,0 +1,112 @@ +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYTASKS; +# +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS +( + targetnodeId int NOT NULL, + migrationId VARCHAR(255) NOT NULL, + pipelinename VARCHAR(255) NOT NULL, + sourcetablename VARCHAR(255) NOT NULL, + targettablename VARCHAR(255) NOT NULL, + columnmap TEXT NULL, + duration VARCHAR(255) NULL, + sourcerowcount int NOT NULL DEFAULT 0, + targetrowcount int NOT NULL DEFAULT 0, + failure char(1) NOT NULL DEFAULT '0', + error TEXT NULL, + published char(1) NOT NULL DEFAULT '0', + truncated char(1) NOT NULL DEFAULT '0', + lastupdate DATETIME NOT NULL DEFAULT '0001-01-01 00:00:00', + avgwriterrowthroughput numeric(10, 2) NULL DEFAULT 0, + avgreaderrowthroughput numeric(10, 2) NULL DEFAULT 0, + copymethod VARCHAR(255) NULL, + keycolumns VARCHAR(255) NULL, + durationinseconds numeric(10, 2) NULL DEFAULT 0, + PRIMARY KEY (migrationid, targetnodeid, pipelinename) +); +# +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYBATCHES; +# +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES +( + migrationId VARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename VARCHAR(255) NOT NULL, + lowerBoundary VARCHAR(255) NOT NULL, + upperBoundary VARCHAR(255) NULL, + PRIMARY KEY (migrationid, batchId, pipelinename) +); +# +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS; +# +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS +( + migrationId VARCHAR(255) NOT NULL, + startAt datetime NOT NULL DEFAULT CURRENT_TIMESTAMP, + endAt datetime, + lastUpdate datetime, + total int NOT NULL DEFAULT 0, + completed int NOT NULL DEFAULT 0, + failed int NOT NULL DEFAULT 0, + status VARCHAR(255) NOT NULL DEFAULT 'RUNNING', + PRIMARY KEY (migrationid) +); +# +DROP TRIGGER IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS_Insert; +DROP TRIGGER IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS_Update; +# +CREATE TRIGGER MIGRATIONTOOLKIT_TABLECOPYSTATUS_Insert + AFTER INSERT + ON MIGRATIONTOOLKIT_TABLECOPYTASKS + FOR EACH ROW +BEGIN + -- latest update overall = latest update timestamp of updated tasks + UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS s + SET s.lastUpdate = NEW.lastUpdate + WHERE s.migrationId = NEW.migrationId; +END; +# +CREATE TRIGGER MIGRATIONTOOLKIT_TABLECOPYSTATUS_Update + AFTER UPDATE + ON MIGRATIONTOOLKIT_TABLECOPYTASKS + FOR EACH ROW +BEGIN + -- latest update overall = latest update timestamp of updated tasks + UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS s + SET s.lastUpdate = NEW.lastUpdate + WHERE s.migrationId = OLD.migrationId; + + IF NEW.failure = '1' OR NEW.duration IS NOT NULL THEN + UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS s + INNER JOIN ( + SELECT migrationId, COUNT(pipelinename) AS completed + FROM MIGRATIONTOOLKIT_TABLECOPYTASKS + WHERE duration IS NOT NULL + GROUP BY migrationId + ) AS t + ON s.migrationId = t.migrationId + SET s.completed = t.completed; + + -- update failed count when tasks failed + UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS s + INNER JOIN ( + SELECT migrationId, COUNT(pipelinename) AS failed + FROM MIGRATIONTOOLKIT_TABLECOPYTASKS + WHERE failure = '1' + GROUP BY migrationId + ) AS t + ON s.migrationId = t.migrationId + SET s.failed = t.failed; + + UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS + SET endAt = UTC_TIMESTAMP() + WHERE migrationId = OLD.migrationId + AND total = completed + AND endAt IS NULL; + + UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS + SET status = 'PROCESSED' + WHERE migrationId = OLD.migrationId + AND status = 'RUNNING' + AND total = completed; + END IF; +END; diff --git a/commercedbsync/resources/sql/createSchedulerTablesOracle.sql b/commercedbsync/resources/sql/createSchedulerTablesORACLE.sql similarity index 88% rename from commercedbsync/resources/sql/createSchedulerTablesOracle.sql rename to commercedbsync/resources/sql/createSchedulerTablesORACLE.sql index 7286422..1b13a17 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesOracle.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesORACLE.sql @@ -1,22 +1,3 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - - BEGIN EXECUTE IMMEDIATE 'DROP TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS'; EXCEPTION @@ -39,9 +20,12 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( failure char(1) DEFAULT '0' NOT NULL, error CLOB NULL, published char(1) DEFAULT '0' NOT NULL, + truncated char(1) DEFAULT '0' NOT NULL, lastupdate Timestamp NOT NULL, avgwriterrowthroughput number(10,2) DEFAULT 0 NULL, avgreaderrowthroughput number(10,2) DEFAULT 0 NULL, + copymethod NVARCHAR2(255) NULL, + keycolumns NVARCHAR2(255) NULL, durationinseconds number(10,2) DEFAULT 0 NULL, PRIMARY KEY (migrationid, targetnodeid, pipelinename) ) @@ -49,6 +33,26 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( +BEGIN + EXECUTE IMMEDIATE 'DROP TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES'; +EXCEPTION + WHEN OTHERS THEN NULL; +END; +/ + + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( + migrationId NVARCHAR2(255) NOT NULL, + batchId number(10) DEFAULT 0 NOT NULL, + pipelinename NVARCHAR2(255) NOT NULL, + lowerBoundary NVARCHAR2(255) NOT NULL, + upperBoundary NVARCHAR2(255) NULL, + PRIMARY KEY (migrationid, batchId, pipelinename) +) +/ + + + BEGIN EXECUTE IMMEDIATE 'DROP TABLE MIGRATIONTOOLKIT_TABLECOPYSTATUS'; diff --git a/commercedbsync/resources/sql/createSchedulerTablesPostGres.sql b/commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql similarity index 87% rename from commercedbsync/resources/sql/createSchedulerTablesPostGres.sql rename to commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql index 451451b..d5440aa 100644 --- a/commercedbsync/resources/sql/createSchedulerTablesPostGres.sql +++ b/commercedbsync/resources/sql/createSchedulerTablesPOSTGRESQL.sql @@ -1,23 +1,3 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - - - DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYTASKS; # @@ -35,15 +15,33 @@ CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYTASKS ( failure char(1) NOT NULL DEFAULT '0', error text NULL, published char(1) NOT NULL DEFAULT '0', + truncated char(1) NOT NULL DEFAULT '0', lastupdate timestamp NOT NULL DEFAULT '0001-01-01 00:00:00', avgwriterrowthroughput numeric(10,2) NULL DEFAULT 0, avgreaderrowthroughput numeric(10,2) NULL DEFAULT 0, + copymethod VARCHAR(255) NULL, + keycolumns VARCHAR(255) NULL, durationinseconds numeric(10,2) NULL DEFAULT 0, PRIMARY KEY (migrationid, targetnodeid, pipelinename) ); # +DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYBATCHES; + +# + +CREATE TABLE MIGRATIONTOOLKIT_TABLECOPYBATCHES ( + migrationId VARCHAR(255) NOT NULL, + batchId int NOT NULL DEFAULT 0, + pipelinename VARCHAR(255) NOT NULL, + lowerBoundary VARCHAR(255) NOT NULL, + upperBoundary VARCHAR(255) NULL, + PRIMARY KEY (migrationid, batchId, pipelinename) +); + +# + DROP TABLE IF EXISTS MIGRATIONTOOLKIT_TABLECOPYSTATUS; # diff --git a/commercedbsync/resources/sql/transformationFunctions/hsqldb-general.sql b/commercedbsync/resources/sql/transformationFunctions/hsqldb-general.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/hsqldb-typeinfotable.sql b/commercedbsync/resources/sql/transformationFunctions/hsqldb-typeinfotable.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/mssql-general.sql b/commercedbsync/resources/sql/transformationFunctions/mssql-general.sql index 174a62d..ce4b833 100644 --- a/commercedbsync/resources/sql/transformationFunctions/mssql-general.sql +++ b/commercedbsync/resources/sql/transformationFunctions/mssql-general.sql @@ -34,25 +34,4 @@ IF @uid = 'admin' RETURN 'plain' RETURN '*' -END; - -CREATE OR ALTER FUNCTION mask_custom(@Prefix int, @Mask varchar(max), @Suffix int, @Original varchar(MAX)) -RETURNS VARCHAR(max) -AS -BEGIN - - RETURN SUBSTRING(@Original,1,@Prefix) + - @Mask + - SUBSTRING(@Original,LEN(@Original) - @Suffix + 1, LEN(@Original)) -END; - -CREATE OR ALTER FUNCTION mask_email(@String varchar(MAX)) -RETURNS VARCHAR(max) -AS -BEGIN - - RETURN LEFT(@String, 3) + '*****@' - + REVERSE(LEFT(RIGHT(REVERSE(@String) , CHARINDEX('@', @String) +2), 2)) - + '******' - + RIGHT(@String, 4) END; \ No newline at end of file diff --git a/commercedbsync/resources/sql/transformationFunctions/mysql-general.sql b/commercedbsync/resources/sql/transformationFunctions/mysql-general.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/mysql-typeinfotable.sql b/commercedbsync/resources/sql/transformationFunctions/mysql-typeinfotable.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/oracle-general.sql b/commercedbsync/resources/sql/transformationFunctions/oracle-general.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/oracle-typeinfotable.sql b/commercedbsync/resources/sql/transformationFunctions/oracle-typeinfotable.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/postgresql-general.sql b/commercedbsync/resources/sql/transformationFunctions/postgresql-general.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/postgresql-typeinfotable.sql b/commercedbsync/resources/sql/transformationFunctions/postgresql-typeinfotable.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/sap-general.sql b/commercedbsync/resources/sql/transformationFunctions/sap-general.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/resources/sql/transformationFunctions/sap-typeinfotable.sql b/commercedbsync/resources/sql/transformationFunctions/sap-typeinfotable.sql deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/CommercedbsyncStandalone.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/CommercedbsyncStandalone.java index 6e3e587..f9a74b9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/CommercedbsyncStandalone.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/CommercedbsyncStandalone.java @@ -1,8 +1,9 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync; import de.hybris.platform.core.Registry; @@ -10,21 +11,22 @@ import de.hybris.platform.util.RedeployUtilities; import de.hybris.platform.util.Utilities; - /** - * Demonstration of how to write a standalone application that can be run directly from within eclipse or from the - * commandline.
+ * Demonstration of how to write a standalone application that can be run + * directly from within eclipse or from the commandline.
* To run this from commandline, just use the following command:
* * java -jar bootstrap/bin/ybootstrap.jar "new CommercedbsyncStandalone().run();" - * From eclipse, just run as Java Application. Note that you maybe need to add all other projects like - * ext-commerce, ext-pim to the Launch configuration classpath. + * From eclipse, just run as Java Application. Note that you maybe need + * to add all other projects like ext-commerce, ext-pim to the Launch + * configuration classpath. */ public class CommercedbsyncStandalone { /** * Main class to be able to run it directly as a java program. * - * @param args the arguments from commandline + * @param args + * the arguments from commandline */ public static void main(final String[] args) { new CommercedbsyncStandalone().run(); @@ -35,8 +37,8 @@ public void run() { Registry.activateMasterTenant(); final JaloSession jaloSession = JaloSession.getCurrentSession(); - System.out.println("Session ID: " + jaloSession.getSessionID()); //NOPMD - System.out.println("User: " + jaloSession.getUser()); //NOPMD + System.out.println("Session ID: " + jaloSession.getSessionID()); // NOPMD + System.out.println("User: " + jaloSession.getUser()); // NOPMD Utilities.printAppInfo(); RedeployUtilities.shutdown(); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java index e235c34..d0fb03b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/DataRepositoryAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -21,6 +21,7 @@ public interface DataRepositoryAdapter { DataSet getBatchOrderedByColumn(MigrationContext context, SeekQueryDefinition queryDefinition) throws Exception; - DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQueryDefinition queryDefinition) throws Exception; + DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQueryDefinition queryDefinition) + throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java index b0aae83..0049ae8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/adapter/impl/ContextualDataRepositoryAdapter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -18,12 +18,12 @@ import java.time.Instant; /** - * Controls the way the repository is accessed by adapting the most common reading - * operations based on the configured context + * Controls the way the repository is accessed by adapting the most common + * reading operations based on the configured context */ public class ContextualDataRepositoryAdapter implements DataRepositoryAdapter { - private DataRepository repository; + private final DataRepository repository; public ContextualDataRepositoryAdapter(DataRepository repository) { this.repository = repository; @@ -31,15 +31,15 @@ public ContextualDataRepositoryAdapter(DataRepository repository) { @Override public long getRowCount(MigrationContext context, String table) throws Exception { - if(context.isDeletionEnabled() || context.isLpTableMigrationEnabled()){ - return repository.getRowCountModifiedAfter(table, getIncrementalTimestamp(context),context.isDeletionEnabled(), context.isLpTableMigrationEnabled()); - } - else{ - if (context.isIncrementalModeEnabled()) { - return repository.getRowCountModifiedAfter(table, getIncrementalTimestamp(context)); + if (context.isDeletionEnabled() || context.isLpTableMigrationEnabled()) { + return repository.getRowCountModifiedAfter(table, getIncrementalTimestamp(context), + context.isDeletionEnabled(), context.isLpTableMigrationEnabled()); } else { - return repository.getRowCount(table); - } + if (context.isIncrementalModeEnabled()) { + return repository.getRowCountModifiedAfter(table, getIncrementalTimestamp(context)); + } else { + return repository.getRowCount(table); + } } } @@ -53,7 +53,8 @@ public DataSet getAll(MigrationContext context, String table) throws Exception { } @Override - public DataSet getBatchWithoutIdentifier(MigrationContext context, OffsetQueryDefinition queryDefinition) throws Exception { + public DataSet getBatchWithoutIdentifier(MigrationContext context, OffsetQueryDefinition queryDefinition) + throws Exception { if (context.isIncrementalModeEnabled()) { return repository.getBatchWithoutIdentifier(queryDefinition, getIncrementalTimestamp(context)); } else { @@ -62,7 +63,8 @@ public DataSet getBatchWithoutIdentifier(MigrationContext context, OffsetQueryDe } @Override - public DataSet getBatchOrderedByColumn(MigrationContext context, SeekQueryDefinition queryDefinition) throws Exception { + public DataSet getBatchOrderedByColumn(MigrationContext context, SeekQueryDefinition queryDefinition) + throws Exception { if (context.isIncrementalModeEnabled()) { return repository.getBatchOrderedByColumn(queryDefinition, getIncrementalTimestamp(context)); } else { @@ -71,7 +73,8 @@ public DataSet getBatchOrderedByColumn(MigrationContext context, SeekQueryDefini } @Override - public DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQueryDefinition queryDefinition) throws Exception { + public DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQueryDefinition queryDefinition) + throws Exception { if (context.isIncrementalModeEnabled()) { return repository.getBatchMarkersOrderedByColumn(queryDefinition, getIncrementalTimestamp(context)); } else { @@ -82,7 +85,9 @@ public DataSet getBatchMarkersOrderedByColumn(MigrationContext context, MarkersQ private Instant getIncrementalTimestamp(MigrationContext context) { Instant incrementalTimestamp = context.getIncrementalTimestamp(); if (incrementalTimestamp == null) { - throw new IllegalStateException("Timestamp cannot be null in incremental mode. Set a timestamp using the property " + CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_TIMESTAMP); + throw new IllegalStateException( + "Timestamp cannot be null in incremental mode. Set a timestamp using the property " + + CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_TIMESTAMP); } return incrementalTimestamp; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataCopyMethod.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataCopyMethod.java new file mode 100644 index 0000000..f409f2d --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataCopyMethod.java @@ -0,0 +1,11 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent; + +public enum DataCopyMethod { + SEEK, OFFSET, DEFAULT +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipe.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipe.java index fd00539..d47de79 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipe.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipe.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -9,8 +9,9 @@ import javax.annotation.concurrent.ThreadSafe; /** - * Used to separate database reading and writing operations, after reading data from the DB, the result - * is put to the pipe and can be used by the database writer later on -> asynchronously + * Used to separate database reading and writing operations, after reading data + * from the DB, the result is put to the pipe and can be used by the database + * writer later on -> asynchronously * * @param */ @@ -21,4 +22,8 @@ public interface DataPipe { void put(MaybeFinished value) throws Exception; MaybeFinished get() throws Exception; + + int size(); + + int getWaitersCount(); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipeFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipeFactory.java index 77bdcca..381964c 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipeFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataPipeFactory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,11 +7,10 @@ package com.sap.cx.boosters.commercedbsync.concurrent; import com.sap.cx.boosters.commercedbsync.context.CopyContext; -import com.sap.cx.boosters.commercedbsync.dataset.DataSet; import javax.annotation.concurrent.ThreadSafe; @ThreadSafe public interface DataPipeFactory { - DataPipe create(CopyContext context, CopyContext.DataCopyItem item) throws Exception; + DataPipe create(CopyContext context, CopyContext.DataCopyItem item) throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolConfigBuilder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolConfigBuilder.java new file mode 100644 index 0000000..5c56806 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolConfigBuilder.java @@ -0,0 +1,28 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent; + +import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; + +public class DataThreadPoolConfigBuilder { + + private final DataThreadPoolConfig config; + + public DataThreadPoolConfigBuilder(MigrationContext context) { + config = new DataThreadPoolConfig(); + } + + public DataThreadPoolConfigBuilder withPoolSize(int poolSize) { + config.setPoolSize(poolSize); + return this; + } + + public DataThreadPoolConfig build() { + return config; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolFactory.java new file mode 100644 index 0000000..aea73f7 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolFactory.java @@ -0,0 +1,19 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent; + +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +public interface DataThreadPoolFactory { + ThreadPoolTaskExecutor create(CopyContext context, DataThreadPoolConfig config); + + void destroy(ThreadPoolTaskExecutor executor); + + DataThreadPoolMonitor getMonitor(); +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolMonitor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolMonitor.java new file mode 100644 index 0000000..d4c262f --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataThreadPoolMonitor.java @@ -0,0 +1,19 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent; + +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +public interface DataThreadPoolMonitor { + void subscribe(ThreadPoolTaskExecutor executor); + + void unsubscribe(ThreadPoolTaskExecutor executor); + + int getActiveCount(); + + int getMaxPoolSize(); +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerExecutor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerExecutor.java index 019ee9a..d9e1a8c 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerExecutor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerExecutor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerPoolFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerPoolFactory.java index 64e7f0c..8a34d7a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerPoolFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/DataWorkerPoolFactory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MDCTaskDecorator.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MDCTaskDecorator.java index 8d37302..2c06238 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MDCTaskDecorator.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MDCTaskDecorator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MaybeFinished.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MaybeFinished.java index e6458da..d1354cc 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MaybeFinished.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/MaybeFinished.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,12 +7,13 @@ package com.sap.cx.boosters.commercedbsync.concurrent; /** - * MaybeFinished keeps track status of the data set that is currently being processed -> if all is ok, - * then status will be done, if theres an exception, it will be poison + * MaybeFinished keeps track status of the data set that is currently being + * processed -> if all is ok, then status will be done, if theres an exception, + * it will be poison * * @param */ -public class MaybeFinished { +public final class MaybeFinished { private final T value; private final boolean done; private final boolean poison; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/PipeAbortedException.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/PipeAbortedException.java index 8fe142b..3ad3605 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/PipeAbortedException.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/PipeAbortedException.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipe.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipe.java index 6d07f3f..bff9002 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipe.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipe.java @@ -1,24 +1,25 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.concurrent.impl; +import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; import com.sap.cx.boosters.commercedbsync.concurrent.MaybeFinished; +import com.sap.cx.boosters.commercedbsync.concurrent.PipeAbortedException; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.scheduler.DatabaseCopyScheduler; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; -import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; -import com.sap.cx.boosters.commercedbsync.concurrent.PipeAbortedException; -import com.sap.cx.boosters.commercedbsync.context.CopyContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.util.concurrent.ArrayBlockingQueue; import java.util.concurrent.BlockingQueue; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; public class DefaultDataPipe implements DataPipe { @@ -27,12 +28,14 @@ public class DefaultDataPipe implements DataPipe { private final BlockingQueue> queue; private final int defaultTimeout; private final AtomicReference abortException = new AtomicReference<>(); + private final AtomicInteger size = new AtomicInteger(); private final CopyContext context; private final CopyContext.DataCopyItem copyItem; private final DatabaseCopyTaskRepository taskRepository; private final DatabaseCopyScheduler scheduler; - public DefaultDataPipe(DatabaseCopyScheduler scheduler, DatabaseCopyTaskRepository taskRepository, CopyContext context, CopyContext.DataCopyItem copyItem, int timeoutInSeconds, int capacity) { + public DefaultDataPipe(DatabaseCopyScheduler scheduler, DatabaseCopyTaskRepository taskRepository, + CopyContext context, CopyContext.DataCopyItem copyItem, int timeoutInSeconds, int capacity) { this.taskRepository = taskRepository; this.scheduler = scheduler; this.context = context; @@ -57,25 +60,39 @@ public void requestAbort(Exception cause) { LOG.warn("could not update error status!", e); } try { - this.queue.offer(MaybeFinished.poison(), defaultTimeout, TimeUnit.SECONDS); - } catch (InterruptedException e) { - LOG.warn("Could not flush pipe with poison", e); + flushPipe(); + } catch (Exception e) { + LOG.warn("Could not flush pipe", e); } } } + private void flushPipe() throws Exception { + // make sure waiting queue offers can be flushed + while (getWaitersCount() > 0) { + queue.poll(defaultTimeout, TimeUnit.SECONDS); + size.decrementAndGet(); + } + queue.clear(); + } + private boolean isAborted() throws Exception { if (this.abortException.get() == null && scheduler.isAborted(this.context)) { - this.requestAbort(new PipeAbortedException("Migration aborted")); + requestAbort(new PipeAbortedException("Migration aborted")); } return this.abortException.get() != null; } - @Override - public void put(MaybeFinished value) throws Exception { + private void assertPipeNotAborted() throws Exception { if (isAborted()) { - throw new PipeAbortedException("pipe aborted", this.abortException.get()); + throw new PipeAbortedException("Pipe aborted", this.abortException.get()); } + } + + @Override + public void put(MaybeFinished value) throws Exception { + assertPipeNotAborted(); + size.incrementAndGet(); if (!queue.offer(value, defaultTimeout, TimeUnit.SECONDS)) { throw new RuntimeException("cannot put new item in time"); } @@ -83,16 +100,25 @@ public void put(MaybeFinished value) throws Exception { @Override public MaybeFinished get() throws Exception { - if (isAborted()) { - throw new PipeAbortedException("pipe aborted", this.abortException.get()); - } + assertPipeNotAborted(); MaybeFinished element = queue.poll(defaultTimeout, TimeUnit.SECONDS); - if (isAborted()) { - throw new PipeAbortedException("pipe aborted", this.abortException.get()); - } + size.decrementAndGet(); if (element == null) { - throw new RuntimeException(String.format("cannot get new item in time. Consider increasing the value of the property '%s' or '%s'", CommercedbsyncConstants.MIGRATION_DATA_PIPE_TIMEOUT, CommercedbsyncConstants.MIGRATION_DATA_PIPE_CAPACITY)); + throw new RuntimeException(String.format( + "cannot get new item in time. Consider increasing the value of the property '%s' or '%s'", + CommercedbsyncConstants.MIGRATION_DATA_PIPE_TIMEOUT, + CommercedbsyncConstants.MIGRATION_DATA_PIPE_CAPACITY)); } return element; } + + @Override + public int size() { + return size.get(); + } + + @Override + public int getWaitersCount() { + return size.get() - queue.size(); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java index 2a37aa0..fd59dcf 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataPipeFactory.java @@ -1,38 +1,45 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.concurrent.impl; +import com.google.common.collect.Lists; +import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.adapter.impl.ContextualDataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.concurrent.DataCopyMethod; +import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; +import com.sap.cx.boosters.commercedbsync.concurrent.DataPipeFactory; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolConfigBuilder; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolFactory; import com.sap.cx.boosters.commercedbsync.concurrent.DataWorkerExecutor; import com.sap.cx.boosters.commercedbsync.concurrent.MaybeFinished; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.BatchMarkerDataReaderTask; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.BatchOffsetDataReaderTask; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.DataReaderTask; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.DefaultDataReaderTask; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.PipeTaskContext; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.dataset.DataSet; import com.sap.cx.boosters.commercedbsync.performance.PerformanceCategory; import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; -import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; import com.sap.cx.boosters.commercedbsync.scheduler.DatabaseCopyScheduler; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; import com.sap.cx.boosters.commercedbsync.views.TableViewGenerator; import org.apache.commons.lang3.tuple.Pair; +import org.fest.util.Collections; +import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; -import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; -import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; -import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; -import com.sap.cx.boosters.commercedbsync.adapter.impl.ContextualDataRepositoryAdapter; -import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; -import com.sap.cx.boosters.commercedbsync.concurrent.DataPipeFactory; -import com.sap.cx.boosters.commercedbsync.concurrent.DataWorkerPoolFactory; -import com.sap.cx.boosters.commercedbsync.concurrent.RetriableTask; -import com.sap.cx.boosters.commercedbsync.context.CopyContext; -import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyBatch; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.core.task.AsyncTaskExecutor; import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; +import java.util.ArrayList; import java.util.LinkedHashSet; import java.util.List; import java.util.Optional; @@ -46,9 +53,10 @@ public class DefaultDataPipeFactory implements DataPipeFactory { private final DatabaseCopyTaskRepository taskRepository; private final DatabaseCopyScheduler scheduler; private final AsyncTaskExecutor executor; - private final DataWorkerPoolFactory dataReadWorkerPoolFactory; + private final DataThreadPoolFactory dataReadWorkerPoolFactory; - public DefaultDataPipeFactory(DatabaseCopyScheduler scheduler, DatabaseCopyTaskRepository taskRepository, AsyncTaskExecutor executor, DataWorkerPoolFactory dataReadWorkerPoolFactory) { + public DefaultDataPipeFactory(DatabaseCopyScheduler scheduler, DatabaseCopyTaskRepository taskRepository, + AsyncTaskExecutor executor, DataThreadPoolFactory dataReadWorkerPoolFactory) { this.scheduler = scheduler; this.taskRepository = taskRepository; this.executor = executor; @@ -59,8 +67,11 @@ public DefaultDataPipeFactory(DatabaseCopyScheduler scheduler, DatabaseCopyTaskR public DataPipe create(CopyContext context, CopyContext.DataCopyItem item) throws Exception { int dataPipeTimeout = context.getMigrationContext().getDataPipeTimeout(); int dataPipeCapacity = context.getMigrationContext().getDataPipeCapacity(); - DataPipe pipe = new DefaultDataPipe<>(scheduler, taskRepository, context, item, dataPipeTimeout, dataPipeCapacity); - ThreadPoolTaskExecutor taskExecutor = dataReadWorkerPoolFactory.create(context); + DataPipe pipe = new DefaultDataPipe<>(scheduler, taskRepository, context, item, dataPipeTimeout, + dataPipeCapacity); + DataThreadPoolConfig threadPoolConfig = new DataThreadPoolConfigBuilder(context.getMigrationContext()) + .withPoolSize(context.getMigrationContext().getMaxParallelReaderWorkers()).build(); + final ThreadPoolTaskExecutor taskExecutor = dataReadWorkerPoolFactory.create(context, threadPoolConfig); DataWorkerExecutor workerExecutor = new DefaultDataWorkerExecutor<>(taskExecutor); try { executor.submit(() -> { @@ -75,9 +86,12 @@ public DataPipe create(CopyContext context, CopyContext.DataCopyItem it } catch (Exception p) { LOG.error("Cannot contaminate pipe ", p); } + if (e instanceof InterruptedException) { + Thread.currentThread().interrupt(); + } } finally { if (taskExecutor != null) { - taskExecutor.shutdown(); + dataReadWorkerPoolFactory.destroy(taskExecutor); } } }); @@ -87,59 +101,110 @@ public DataPipe create(CopyContext context, CopyContext.DataCopyItem it return pipe; } - private void scheduleWorkers(CopyContext context, DataWorkerExecutor workerExecutor, DataPipe pipe, CopyContext.DataCopyItem copyItem) throws Exception { - DataRepositoryAdapter dataRepositoryAdapter = new ContextualDataRepositoryAdapter(context.getMigrationContext().getDataSourceRepository()); + private void scheduleWorkers(CopyContext context, DataWorkerExecutor workerExecutor, + DataPipe pipe, CopyContext.DataCopyItem copyItem) throws Exception { + DataRepositoryAdapter dataRepositoryAdapter = new ContextualDataRepositoryAdapter( + context.getMigrationContext().getDataSourceRepository()); String table = copyItem.getSourceItem(); long totalRows = copyItem.getRowCount(); long pageSize = context.getMigrationContext().getReaderBatchSize(); try { - PerformanceRecorder recorder = context.getPerformanceProfiler().createRecorder(PerformanceCategory.DB_READ, table); + PerformanceRecorder recorder = context.getPerformanceProfiler().createRecorder(PerformanceCategory.DB_READ, + table); recorder.start(); - PipeTaskContext pipeTaskContext = new PipeTaskContext(context, pipe, table, dataRepositoryAdapter, pageSize, recorder); + PipeTaskContext pipeTaskContext = new PipeTaskContext(context, pipe, table, dataRepositoryAdapter, pageSize, + recorder, taskRepository); String batchColumn = ""; // help.sap.com/viewer/d0224eca81e249cb821f2cdf45a82ace/LATEST/en-US/08a27931a21441b59094c8a6aa2a880e.html - if (context.getMigrationContext().getDataSourceRepository().isAuditTable(table) && - context.getMigrationContext().getDataSourceRepository().getAllColumnNames(table).contains("ID")) { - batchColumn = "ID"; - } else if (context.getMigrationContext().getDataSourceRepository().getAllColumnNames(table).contains("PK")) { + final Set allColumnNames = context.getMigrationContext().getDataSourceRepository() + .getAllColumnNames(table); + if (allColumnNames.contains("PK")) { batchColumn = "PK"; + } else if (allColumnNames.contains("ID") + && context.getMigrationContext().getDataSourceRepository().isAuditTable(table)) { + batchColumn = "ID"; } LOG.debug("Using batchColumn: {}", batchColumn.isEmpty() ? "NONE" : batchColumn); if (batchColumn.isEmpty()) { // trying offset queries with unique index columns Set batchColumns; - DataSet uniqueColumns = context.getMigrationContext().getDataSourceRepository().getUniqueColumns(TableViewGenerator.getTableNameForView(table, context.getMigrationContext())); + DataSet uniqueColumns = context.getMigrationContext().getDataSourceRepository() + .getUniqueColumns(TableViewGenerator.getTableNameForView(table, context.getMigrationContext())); if (uniqueColumns.isNotEmpty()) { if (uniqueColumns.getColumnCount() == 0) { - throw new IllegalStateException("Corrupt dataset retrieved. Dataset should have information about unique columns"); + throw new IllegalStateException( + "Corrupt dataset retrieved. Dataset should have information about unique columns"); } - batchColumns = uniqueColumns.getAllResults().stream().map(row -> String.valueOf(row.get(0))).collect(Collectors.toCollection(LinkedHashSet::new)); - for (int offset = 0; offset < totalRows; offset += pageSize) { - DataReaderTask dataReaderTask = new BatchOffsetDataReaderTask(pipeTaskContext, offset, batchColumns); + batchColumns = uniqueColumns.getAllResults().stream().map(row -> String.valueOf(row.get(0))) + .collect(Collectors.toCollection(LinkedHashSet::new)); + taskRepository.updateTaskCopyMethod(context, copyItem, DataCopyMethod.OFFSET.toString()); + taskRepository.updateTaskKeyColumns(context, copyItem, batchColumns); + + List batches = null; + if (context.getMigrationContext().isSchedulerResumeEnabled()) { + Set pendingBatchesForPipeline = taskRepository + .findPendingBatchesForPipeline(context, copyItem); + batches = pendingBatchesForPipeline.stream() + .map(b -> Long.valueOf(b.getLowerBoundary().toString())).collect(Collectors.toList()); + taskRepository.resetPipelineBatches(context, copyItem); + } else { + batches = new ArrayList<>(); + for (long offset = 0; offset < totalRows; offset += pageSize) { + batches.add(offset); + } + } + + for (int batchId = 0; batchId < batches.size(); batchId++) { + long offset = batches.get(batchId); + DataReaderTask dataReaderTask = new BatchOffsetDataReaderTask(pipeTaskContext, batchId, offset, + batchColumns); + taskRepository.scheduleBatch(context, copyItem, batchId, offset, offset + pageSize); workerExecutor.safelyExecute(dataReaderTask); } } else { - //If no unique columns available to do batch sorting, fallback to read all - LOG.warn("Reading all rows at once without batching for table {}. Memory consumption might be negatively affected", table); + // If no unique columns available to do batch sorting, fallback to read all + LOG.warn( + "Reading all rows at once without batching for table {}. Memory consumption might be negatively affected", + table); + taskRepository.updateTaskCopyMethod(context, copyItem, DataCopyMethod.DEFAULT.toString()); + if (context.getMigrationContext().isSchedulerResumeEnabled()) { + taskRepository.resetPipelineBatches(context, copyItem); + } + taskRepository.scheduleBatch(context, copyItem, 0, 0, totalRows); DataReaderTask dataReaderTask = new DefaultDataReaderTask(pipeTaskContext); workerExecutor.safelyExecute(dataReaderTask); } } else { // do the pagination by value comparison - MarkersQueryDefinition queryDefinition = new MarkersQueryDefinition(); - queryDefinition.setTable(table); - queryDefinition.setColumn(batchColumn); - queryDefinition.setBatchSize(pageSize); - queryDefinition.setDeletionEnabled(context.getMigrationContext().isDeletionEnabled()); - queryDefinition.setLpTableEnabled(context.getMigrationContext().isLpTableMigrationEnabled()); - DataSet batchMarkers = dataRepositoryAdapter.getBatchMarkersOrderedByColumn(context.getMigrationContext(), queryDefinition); - List> batchMarkersList = batchMarkers.getAllResults(); - if (batchMarkersList.isEmpty()) { - throw new RuntimeException("Could not retrieve batch values for table " + table); + taskRepository.updateTaskCopyMethod(context, copyItem, DataCopyMethod.SEEK.toString()); + taskRepository.updateTaskKeyColumns(context, copyItem, Lists.newArrayList(batchColumn)); + + List> batchMarkersList = null; + if (context.getMigrationContext().isSchedulerResumeEnabled()) { + batchMarkersList = new ArrayList<>(); + Set pendingBatchesForPipeline = taskRepository + .findPendingBatchesForPipeline(context, copyItem); + batchMarkersList.addAll(pendingBatchesForPipeline.stream() + .map(b -> Collections.list(b.getLowerBoundary())).collect(Collectors.toList())); + taskRepository.resetPipelineBatches(context, copyItem); + } else { + MarkersQueryDefinition queryDefinition = new MarkersQueryDefinition(); + queryDefinition.setTable(table); + queryDefinition.setColumn(batchColumn); + queryDefinition.setBatchSize(pageSize); + queryDefinition.setDeletionEnabled(context.getMigrationContext().isDeletionEnabled()); + queryDefinition.setLpTableEnabled(context.getMigrationContext().isLpTableMigrationEnabled()); + DataSet batchMarkers = dataRepositoryAdapter + .getBatchMarkersOrderedByColumn(context.getMigrationContext(), queryDefinition); + batchMarkersList = batchMarkers.getAllResults(); + if (batchMarkersList.isEmpty()) { + throw new RuntimeException("Could not retrieve batch values for table " + table); + } } + for (int i = 0; i < batchMarkersList.size(); i++) { List lastBatchMarkerRow = batchMarkersList.get(i); Optional> nextBatchMarkerRow = Optional.empty(); @@ -147,8 +212,20 @@ private void scheduleWorkers(CopyContext context, DataWorkerExecutor wo if (nextIndex < batchMarkersList.size()) { nextBatchMarkerRow = Optional.of(batchMarkersList.get(nextIndex)); } - DataReaderTask dataReaderTask = new BatchMarkerDataReaderTask(pipeTaskContext, batchColumn, Pair.of(lastBatchMarkerRow, nextBatchMarkerRow)); - workerExecutor.safelyExecute(dataReaderTask); + if (!Collections.isEmpty(lastBatchMarkerRow)) { + Object lastBatchValue = lastBatchMarkerRow.get(0); + Pair batchMarkersPair = Pair.of(lastBatchValue, + nextBatchMarkerRow.map(v -> v.get(0)).orElseGet(() -> null)); + DataReaderTask dataReaderTask = new BatchMarkerDataReaderTask(pipeTaskContext, i, batchColumn, + batchMarkersPair); + // After creating the task, we register the batch in the db for later use if + // necessary + taskRepository.scheduleBatch(context, copyItem, i, batchMarkersPair.getLeft(), + batchMarkersPair.getRight()); + workerExecutor.safelyExecute(dataReaderTask); + } else { + throw new IllegalArgumentException("Invalid batch marker passed to task"); + } } } } catch (Exception ex) { @@ -160,167 +237,4 @@ private void scheduleWorkers(CopyContext context, DataWorkerExecutor wo throw new RuntimeException("Exception while preparing reader tasks", ex); } } - - private static abstract class DataReaderTask extends RetriableTask { - private static final Logger LOG = LoggerFactory.getLogger(DataReaderTask.class); - - private PipeTaskContext pipeTaskContext; - - public DataReaderTask(PipeTaskContext pipeTaskContext) { - super(pipeTaskContext.getContext(), pipeTaskContext.getTable()); - this.pipeTaskContext = pipeTaskContext; - } - - public PipeTaskContext getPipeTaskContext() { - return pipeTaskContext; - } - } - - private static class DefaultDataReaderTask extends DataReaderTask { - - public DefaultDataReaderTask(PipeTaskContext pipeTaskContext) { - super(pipeTaskContext); - } - - @Override - protected Boolean internalRun() throws Exception { - process(); - return Boolean.TRUE; - } - - private void process() throws Exception { - MigrationContext migrationContext = getPipeTaskContext().getContext().getMigrationContext(); - DataSet all = getPipeTaskContext().getDataRepositoryAdapter().getAll(migrationContext, getPipeTaskContext().getTable()); - getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, all.getAllResults().size()); - getPipeTaskContext().getPipe().put(MaybeFinished.of(all)); - } - } - - private static class BatchOffsetDataReaderTask extends DataReaderTask { - - private long offset = 0; - private Set batchColumns; - - public BatchOffsetDataReaderTask(PipeTaskContext pipeTaskContext, long offset, Set batchColumns) { - super(pipeTaskContext); - this.offset = offset; - this.batchColumns = batchColumns; - } - - @Override - protected Boolean internalRun() throws Exception { - process(); - return Boolean.TRUE; - } - - private void process() throws Exception { - DataRepositoryAdapter adapter = getPipeTaskContext().getDataRepositoryAdapter(); - CopyContext context = getPipeTaskContext().getContext(); - String table = getPipeTaskContext().getTable(); - long pageSize = getPipeTaskContext().getPageSize(); - OffsetQueryDefinition queryDefinition = new OffsetQueryDefinition(); - queryDefinition.setTable(table); - queryDefinition.setOrderByColumns(batchColumns.stream().collect(Collectors.joining(","))); - queryDefinition.setBatchSize(pageSize); - queryDefinition.setOffset(offset); - queryDefinition.setDeletionEnabled(context.getMigrationContext().isDeletionEnabled()); - queryDefinition.setLpTableEnabled(context.getMigrationContext().isLpTableMigrationEnabled()); - DataSet result = adapter.getBatchWithoutIdentifier(context.getMigrationContext(), queryDefinition); - getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, result.getAllResults().size()); - getPipeTaskContext().getPipe().put(MaybeFinished.of(result)); - } - } - - private static class BatchMarkerDataReaderTask extends DataReaderTask { - - private final String batchColumn; - private final Pair, Optional>> batchMarkersPair; - - public BatchMarkerDataReaderTask(PipeTaskContext pipeTaskContext, String batchColumn, Pair, Optional>> batchMarkersPair) { - super(pipeTaskContext); - this.batchColumn = batchColumn; - this.batchMarkersPair = batchMarkersPair; - } - - @Override - protected Boolean internalRun() throws Exception { - List lastBatchMarker = batchMarkersPair.getLeft(); - Optional> nextBatchMarker = batchMarkersPair.getRight(); - if (lastBatchMarker != null && lastBatchMarker.size() == 2) { - Object lastBatchValue = lastBatchMarker.get(0); - process(lastBatchValue, nextBatchMarker.map(v -> v.get(0))); - return Boolean.TRUE; - } else { - throw new IllegalArgumentException("Invalid batch marker passed to task"); - } - } - - private void process(Object lastValue, Optional nextValue) throws Exception { - CopyContext ctx = getPipeTaskContext().getContext(); - DataRepositoryAdapter adapter = getPipeTaskContext().getDataRepositoryAdapter(); - String table = getPipeTaskContext().getTable(); - long pageSize = getPipeTaskContext().getPageSize(); - SeekQueryDefinition queryDefinition = new SeekQueryDefinition(); - queryDefinition.setTable(table); - queryDefinition.setColumn(batchColumn); - queryDefinition.setLastColumnValue(lastValue); - queryDefinition.setNextColumnValue(nextValue.orElseGet(() -> null)); - queryDefinition.setBatchSize(pageSize); - queryDefinition.setDeletionEnabled(ctx.getMigrationContext().isDeletionEnabled()); - queryDefinition.setLpTableEnabled(ctx.getMigrationContext().isLpTableMigrationEnabled()); - if(LOG.isDebugEnabled()) - { - LOG.debug("Executing markers query for {} with lastvalue: {}, nextvalue: {}, batchsize: {}", table, lastValue, nextValue.orElseGet(() -> null), pageSize); - } - DataSet page = adapter.getBatchOrderedByColumn(ctx.getMigrationContext(), queryDefinition); - getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, pageSize); - getPipeTaskContext().getPipe().put(MaybeFinished.of(page)); - } - } - - private static class PipeTaskContext { - private CopyContext context; - private DataPipe pipe; - private String table; - private DataRepositoryAdapter dataRepositoryAdapter; - private long pageSize; - private PerformanceRecorder recorder; - - public PipeTaskContext(CopyContext context, DataPipe pipe, String table, DataRepositoryAdapter dataRepositoryAdapter, long pageSize, PerformanceRecorder recorder) { - this.context = context; - this.pipe = pipe; - this.table = table; - this.dataRepositoryAdapter = dataRepositoryAdapter; - this.pageSize = pageSize; - this.recorder = recorder; - } - - public CopyContext getContext() { - return context; - } - - public DataPipe getPipe() { - return pipe; - } - - public String getTable() { - return table; - } - - public DataRepositoryAdapter getDataRepositoryAdapter() { - return dataRepositoryAdapter; - } - - public long getPageSize() { - return pageSize; - } - - public PerformanceRecorder getRecorder() { - return recorder; - } - - } - } - - diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataThreadPoolFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataThreadPoolFactory.java new file mode 100644 index 0000000..a67785f --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataThreadPoolFactory.java @@ -0,0 +1,91 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl; + +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolFactory; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolMonitor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; +import org.springframework.core.task.TaskDecorator; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +public class DefaultDataThreadPoolFactory implements DataThreadPoolFactory { + + private static final Logger LOG = LoggerFactory.getLogger(DefaultDataThreadPoolFactory.class); + private static final int MAX_CAPACITY = 2147483647; + + private final DataThreadPoolMonitor monitor; + + private final TaskDecorator taskDecorator; + private final String threadNamePrefix; + private final int corePoolSize; + private final int maxPoolSize; + private final int keepAliveSeconds; + private final boolean allowCoreThreadTimeOut; + private final boolean waitForTasksToCompleteOnShutdown; + private int queueCapacity = MAX_CAPACITY; + + public DefaultDataThreadPoolFactory(final TaskDecorator taskDecorator, final String threadNamePrefix, + final int maxPoolSize, final int keepAliveSeconds, final boolean allowCoreThreadTimeOut, + final boolean waitForTasksToCompleteOnShutdown, final boolean queueable) { + this.monitor = new DefaultDataThreadPoolMonitor(); + this.taskDecorator = taskDecorator; + this.threadNamePrefix = threadNamePrefix; + this.maxPoolSize = maxPoolSize; + this.keepAliveSeconds = keepAliveSeconds; + this.allowCoreThreadTimeOut = allowCoreThreadTimeOut; + this.waitForTasksToCompleteOnShutdown = waitForTasksToCompleteOnShutdown; + this.queueCapacity = getQueueCapacity(queueable); + this.corePoolSize = maxPoolSize; + } + + @Override + public ThreadPoolTaskExecutor create(final CopyContext context, final DataThreadPoolConfig config) { + final ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); + executor.setTaskDecorator(taskDecorator); + executor.setThreadNamePrefix(threadNamePrefix); + + executor.setCorePoolSize(defaultIfNull(config.getPoolSize(), corePoolSize)); + executor.setMaxPoolSize(defaultIfNull(config.getPoolSize(), corePoolSize)); + executor.setQueueCapacity(queueCapacity); + executor.setKeepAliveSeconds(keepAliveSeconds); + executor.setAllowCoreThreadTimeOut(allowCoreThreadTimeOut); + executor.setWaitForTasksToCompleteOnShutdown(waitForTasksToCompleteOnShutdown); + if (waitForTasksToCompleteOnShutdown) { + executor.setAwaitTerminationSeconds(Integer.MAX_VALUE); + } + executor.initialize(); + monitor.subscribe(executor); + LOG.debug("Creating executor with parameters: Pool Size:{}", corePoolSize); + return executor; + } + + @Override + public void destroy(ThreadPoolTaskExecutor executor) { + executor.shutdown(); + monitor.unsubscribe(executor); + } + + @Override + public DataThreadPoolMonitor getMonitor() { + return monitor; + } + + private T defaultIfNull(T value, T def) { + if (value == null) { + return def; + } else { + return value; + } + } + + private int getQueueCapacity(boolean queueable) { + return queueable ? this.queueCapacity : 0; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataThreadPoolMonitor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataThreadPoolMonitor.java new file mode 100644 index 0000000..2777eb4 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataThreadPoolMonitor.java @@ -0,0 +1,47 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl; + +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolMonitor; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +/** + * + */ +public class DefaultDataThreadPoolMonitor implements DataThreadPoolMonitor { + + private final List executors; + + public DefaultDataThreadPoolMonitor() { + this.executors = Collections.synchronizedList(new ArrayList<>()); + } + + @Override + public void subscribe(ThreadPoolTaskExecutor executor) { + executors.add(executor); + } + + @Override + public void unsubscribe(ThreadPoolTaskExecutor executor) { + executors.remove(executor); + } + + @Override + public int getActiveCount() { + return executors.stream().mapToInt(ThreadPoolTaskExecutor::getActiveCount).sum(); + } + + @Override + public int getMaxPoolSize() { + return executors.stream().mapToInt(ThreadPoolTaskExecutor::getMaxPoolSize).sum(); + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerExecutor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerExecutor.java index 62d53ce..71f75c6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerExecutor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerExecutor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -24,9 +24,8 @@ public class DefaultDataWorkerExecutor implements DataWorkerExecutor { private static final Logger LOG = LoggerFactory.getLogger(DefaultDataWorkerExecutor.class); - private AsyncTaskExecutor executor; - private Queue> futures = new ArrayDeque<>(); - + private final AsyncTaskExecutor executor; + private final Queue> futures = new ArrayDeque<>(); public DefaultDataWorkerExecutor(AsyncTaskExecutor executor) { this.executor = executor; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerPoolFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerPoolFactory.java deleted file mode 100644 index 8de0f1e..0000000 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/DefaultDataWorkerPoolFactory.java +++ /dev/null @@ -1,48 +0,0 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -package com.sap.cx.boosters.commercedbsync.concurrent.impl; - -import com.sap.cx.boosters.commercedbsync.concurrent.DataWorkerPoolFactory; -import com.sap.cx.boosters.commercedbsync.context.CopyContext; -import org.springframework.core.task.TaskDecorator; -import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; - -public class DefaultDataWorkerPoolFactory implements DataWorkerPoolFactory { - - private TaskDecorator taskDecorator; - private String threadNamePrefix; - private int corePoolSize; - private int maxPoolSize; - private int keepAliveSeconds; - private int queueCapacity = 2147483647; - - public DefaultDataWorkerPoolFactory(TaskDecorator taskDecorator, String threadNamePrefix, int maxPoolSize, int keepAliveSeconds, boolean queueable) { - this.taskDecorator = taskDecorator; - this.threadNamePrefix = threadNamePrefix; - this.maxPoolSize = maxPoolSize; - this.keepAliveSeconds = keepAliveSeconds; - this.queueCapacity = queueable ? this.queueCapacity : 0; - this.corePoolSize = maxPoolSize; - } - - @Override - public ThreadPoolTaskExecutor create(CopyContext context) { - ThreadPoolTaskExecutor executor = new ThreadPoolTaskExecutor(); - executor.setTaskDecorator(taskDecorator); - executor.setThreadNamePrefix(threadNamePrefix); - executor.setCorePoolSize(corePoolSize); - executor.setMaxPoolSize(maxPoolSize); - executor.setQueueCapacity(queueCapacity); - executor.setKeepAliveSeconds(keepAliveSeconds); - executor.setAllowCoreThreadTimeOut(true); - executor.setWaitForTasksToCompleteOnShutdown(true); - executor.setAwaitTerminationSeconds(Integer.MAX_VALUE); - executor.initialize(); - return executor; - } - -} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java new file mode 100644 index 0000000..b4087e2 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchMarkerDataReaderTask.java @@ -0,0 +1,62 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; + +import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; +import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.concurrent.MaybeFinished; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; +import org.apache.commons.lang3.tuple.Pair; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class BatchMarkerDataReaderTask extends DataReaderTask { + private static final Logger LOG = LoggerFactory.getLogger(BatchMarkerDataReaderTask.class); + + private final String batchColumn; + private final Pair batchMarkersPair; + private final int batchId; + + public BatchMarkerDataReaderTask(PipeTaskContext pipeTaskContext, int batchId, String batchColumn, + Pair batchMarkersPair) { + super(pipeTaskContext); + this.batchId = batchId; + this.batchColumn = batchColumn; + this.batchMarkersPair = batchMarkersPair; + } + + @Override + protected Boolean internalRun() throws Exception { + process(batchMarkersPair.getLeft(), batchMarkersPair.getRight()); + return Boolean.TRUE; + } + + private void process(Object lastValue, Object nextValue) throws Exception { + CopyContext ctx = getPipeTaskContext().getContext(); + DataRepositoryAdapter adapter = getPipeTaskContext().getDataRepositoryAdapter(); + String table = getPipeTaskContext().getTable(); + long pageSize = getPipeTaskContext().getPageSize(); + SeekQueryDefinition queryDefinition = new SeekQueryDefinition(); + queryDefinition.setBatchId(batchId); + queryDefinition.setTable(table); + queryDefinition.setColumn(batchColumn); + queryDefinition.setLastColumnValue(lastValue); + queryDefinition.setNextColumnValue(nextValue); + queryDefinition.setBatchSize(pageSize); + queryDefinition.setDeletionEnabled(ctx.getMigrationContext().isDeletionEnabled()); + queryDefinition.setLpTableEnabled(ctx.getMigrationContext().isLpTableMigrationEnabled()); + if (LOG.isDebugEnabled()) { + LOG.debug("Executing markers query for {} with lastvalue: {}, nextvalue: {}, batchsize: {}", table, + lastValue, nextValue, pageSize); + } + DataSet page = adapter.getBatchOrderedByColumn(ctx.getMigrationContext(), queryDefinition); + getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, pageSize); + getPipeTaskContext().getPipe().put(MaybeFinished.of(page)); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchOffsetDataReaderTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchOffsetDataReaderTask.java new file mode 100644 index 0000000..67b9467 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/BatchOffsetDataReaderTask.java @@ -0,0 +1,56 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; + +import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; +import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.concurrent.MaybeFinished; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; + +import java.util.Set; +import java.util.stream.Collectors; + +public class BatchOffsetDataReaderTask extends DataReaderTask { + + private final long offset; + private final Set batchColumns; + private final int batchId; + + public BatchOffsetDataReaderTask(PipeTaskContext pipeTaskContext, int batchId, long offset, + Set batchColumns) { + super(pipeTaskContext); + this.batchId = batchId; + this.offset = offset; + this.batchColumns = batchColumns; + } + + @Override + protected Boolean internalRun() throws Exception { + process(); + return Boolean.TRUE; + } + + private void process() throws Exception { + DataRepositoryAdapter adapter = getPipeTaskContext().getDataRepositoryAdapter(); + CopyContext context = getPipeTaskContext().getContext(); + String table = getPipeTaskContext().getTable(); + long pageSize = getPipeTaskContext().getPageSize(); + OffsetQueryDefinition queryDefinition = new OffsetQueryDefinition(); + queryDefinition.setBatchId(batchId); + queryDefinition.setTable(table); + queryDefinition.setOrderByColumns(batchColumns.stream().collect(Collectors.joining(","))); + queryDefinition.setBatchSize(pageSize); + queryDefinition.setOffset(offset); + queryDefinition.setDeletionEnabled(context.getMigrationContext().isDeletionEnabled()); + queryDefinition.setLpTableEnabled(context.getMigrationContext().isLpTableMigrationEnabled()); + DataSet result = adapter.getBatchWithoutIdentifier(context.getMigrationContext(), queryDefinition); + getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, result.getAllResults().size()); + getPipeTaskContext().getPipe().put(MaybeFinished.of(result)); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/DataReaderTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/DataReaderTask.java new file mode 100644 index 0000000..b57d5fa --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/DataReaderTask.java @@ -0,0 +1,21 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; + +public abstract class DataReaderTask extends RetriableTask { + + private final PipeTaskContext pipeTaskContext; + + protected DataReaderTask(PipeTaskContext pipeTaskContext) { + super(pipeTaskContext.getContext(), pipeTaskContext.getTable()); + this.pipeTaskContext = pipeTaskContext; + } + + public PipeTaskContext getPipeTaskContext() { + return pipeTaskContext; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/DefaultDataReaderTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/DefaultDataReaderTask.java new file mode 100644 index 0000000..f111b8d --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/DefaultDataReaderTask.java @@ -0,0 +1,33 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; + +import com.sap.cx.boosters.commercedbsync.concurrent.MaybeFinished; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; + +public class DefaultDataReaderTask extends DataReaderTask { + + public DefaultDataReaderTask(PipeTaskContext pipeTaskContext) { + super(pipeTaskContext); + } + + @Override + protected Boolean internalRun() throws Exception { + process(); + return Boolean.TRUE; + } + + private void process() throws Exception { + MigrationContext migrationContext = getPipeTaskContext().getContext().getMigrationContext(); + DataSet all = getPipeTaskContext().getDataRepositoryAdapter().getAll(migrationContext, + getPipeTaskContext().getTable()); + getPipeTaskContext().getRecorder().record(PerformanceUnit.ROWS, all.getAllResults().size()); + getPipeTaskContext().getPipe().put(MaybeFinished.of(all)); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/PipeTaskContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/PipeTaskContext.java new file mode 100644 index 0000000..501c9e6 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/PipeTaskContext.java @@ -0,0 +1,64 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; + +import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; + +public class PipeTaskContext { + private final CopyContext context; + private final DataPipe pipe; + private final String table; + private final DataRepositoryAdapter dataRepositoryAdapter; + private final long pageSize; + private final PerformanceRecorder recorder; + private final DatabaseCopyTaskRepository taskRepository; + + public PipeTaskContext(CopyContext context, DataPipe pipe, String table, + DataRepositoryAdapter dataRepositoryAdapter, long pageSize, PerformanceRecorder recorder, + DatabaseCopyTaskRepository taskRepository) { + this.context = context; + this.pipe = pipe; + this.table = table; + this.dataRepositoryAdapter = dataRepositoryAdapter; + this.pageSize = pageSize; + this.recorder = recorder; + this.taskRepository = taskRepository; + } + + public CopyContext getContext() { + return context; + } + + public DataPipe getPipe() { + return pipe; + } + + public String getTable() { + return table; + } + + public DataRepositoryAdapter getDataRepositoryAdapter() { + return dataRepositoryAdapter; + } + + public long getPageSize() { + return pageSize; + } + + public PerformanceRecorder getRecorder() { + return recorder; + } + + public DatabaseCopyTaskRepository getTaskRepository() { + return taskRepository; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/RetriableTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/RetriableTask.java similarity index 71% rename from commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/RetriableTask.java rename to commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/RetriableTask.java index 353c218..c8615b3 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/RetriableTask.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/concurrent/impl/task/RetriableTask.java @@ -1,11 +1,12 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ -package com.sap.cx.boosters.commercedbsync.concurrent; +package com.sap.cx.boosters.commercedbsync.concurrent.impl.task; +import com.sap.cx.boosters.commercedbsync.concurrent.PipeAbortedException; import com.sap.cx.boosters.commercedbsync.context.CopyContext; import org.apache.commons.lang3.exception.ExceptionUtils; import org.slf4j.Logger; @@ -17,11 +18,11 @@ public abstract class RetriableTask implements Callable { private static final Logger LOG = LoggerFactory.getLogger(RetriableTask.class); - private CopyContext context; - private String table; + private final CopyContext context; + private final String table; private int retryCount = 0; - public RetriableTask(CopyContext context, String table) { + protected RetriableTask(CopyContext context, String table) { this.context = context; this.table = table; } @@ -34,8 +35,8 @@ public Boolean call() { throw new RuntimeException("Ignore retries", e); } catch (Exception e) { if (retryCount < context.getMigrationContext().getMaxWorkerRetryAttempts()) { - LOG.warn("Retrying failed task {} for table {}. Retry count: {}. Cause: {}", getClass().getName(), table, retryCount, e); - e.printStackTrace(); + LOG.error("Retrying failed task {} for table {}. Retry count: {}. Cause: {}", getClass().getName(), + table, retryCount, e); retryCount++; return call(); } else { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java index 780f72b..4edc6fe 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/constants/CommercedbsyncConstants.java @@ -1,14 +1,14 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ -package com.sap.cx.boosters.commercedbsync.constants; -import com.sap.cx.boosters.commercedbsync.constants.GeneratedCommercedbsyncConstants; +package com.sap.cx.boosters.commercedbsync.constants; /** - * Global class for all Commercedbsync constants. You can add global constants for your extension into this class. + * Global class for all Commercedbsync constants. You can add global constants + * for your extension into this class. */ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConstants { public static final String EXTENSIONNAME = "commercedbsync"; @@ -20,6 +20,7 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String MIGRATION_SCHEMA_TARGET_COLUMNS_ADD_ENABLED = "migration.schema.target.columns.add.enabled"; public static final String MIGRATION_SCHEMA_TARGET_COLUMNS_REMOVE_ENABLED = "migration.schema.target.columns.remove.enabled"; public static final String MIGRATION_TARGET_MAX_STAGE_MIGRATIONS = "migration.ds.target.db.max.stage.migrations"; + public static final String MIGRATION_DATA_EXPORT_ENABLED = "migration.data.export.enabled"; public static final String MIGRATION_SCHEMA_AUTOTRIGGER_ENABLED = "migration.schema.autotrigger.enabled"; public static final String MIGRATION_DATA_FULLDATABASE = "migration.data.fulldatabase.enabled"; public static final String MIGRATION_DATA_READER_BATCHSIZE = "migration.data.reader.batchsize"; @@ -43,13 +44,28 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String MIGRATION_DATA_INCREMENTAL_ENABLED = "migration.data.incremental.enabled"; public static final String MIGRATION_DATA_INCREMENTAL_TABLES = "migration.data.incremental.tables"; public static final String MIGRATION_DATA_INCREMENTAL_TIMESTAMP = "migration.data.incremental.timestamp"; - public static final String MIGRATION_DATA_BULKCOPY_ENABLED = "migration.data.bulkcopy.enabled"; public static final String MIGRATION_DATA_PIPE_TIMEOUT = "migration.data.pipe.timeout"; public static final String MIGRATION_DATA_PIPE_CAPACITY = "migration.data.pipe.capacity"; public static final String MIGRATION_STALLED_TIMEOUT = "migration.stalled.timeout"; - public static final String MIGRATION_DATA_REPORT_CONNECTIONSTRING = "migration.data.report.connectionstring"; + public static final String MIGRATION_FILE_STORAGE_CONNECTIONSTRING = "migration.data.report.connectionstring"; public static final String MIGRATION_DATATYPE_CHECK = "migration.datatype.check"; public static final String MIGRATION_TABLESPREFIX = "MIGRATIONTOOLKIT_"; + public static final String MIGRATION_SCHEDULER_RESUME_ENABLED = "migration.scheduler.resume.enabled"; + public static final String MIGRATION_LOG_SQL = "migration.log.sql"; + public static final String MIGRATION_LOG_SQL_PARAMS_SOURCE = "migration.log.sql.source.showparameters"; + public static final String MIGRATION_SQL_STORE_FLUSH_THRESHOLD = "migration.log.sql.memory.flush.threshold.nbentries"; + public static final String MIGRATION_FILE_STORAGE_CONTAINER_NAME = "migration.data.filestorage.container.name"; + public static final String MIGRATION_INPUT_PROFILES = "migration.input.profiles"; + public static final String MIGRATION_OUTPUT_PROFILES = "migration.output.profiles"; + + public static final String MIGRATION_DATA_READTASK_KEEPALIVE_SECONDS = "migration.data.readtask.keepaliveseconds"; + public static final String MIGRATION_DATA_READTASK_QUEUE_CAPACITY = "migration.data.readtask.queuecapacity"; + public static final String MIGRATION_DATA_READTASK_ALLOWTIMEOUT = "migration.data.readtask.allowtimeout"; + public static final String MIGRATION_DATA_WRITETASK_KEEPALIVE_SECONDS = "migration.data.writetask.keepaliveseconds"; + public static final String MIGRATION_DATA_WRITETASK_ALLOWTIMEOUT = "migration.data.writetask.allowtimeout"; + + public static final String MIGRATION_DATA_WORKERS_READER_KEEPALIVESECONDS = "migration.data.workers.reader.keepaliveseconds"; + public static final String MIGRATION_DATA_WORKERS_WRITER_KEEPALIVESECONDS = "migration.data.workers.writer.keepaliveseconds"; public static final String MDC_MIGRATIONID = "migrationID"; public static final String MDC_PIPELINE = "pipeline"; @@ -57,7 +73,6 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String DEPLOYMENTS_TABLE = "ydeployments"; - // Masking public static final String MIGRATION_REPORT_MASKED_PROPERTIES = "migration.properties.masked"; public static final String MASKED_VALUE = "***"; @@ -70,30 +85,23 @@ public final class CommercedbsyncConstants extends GeneratedCommercedbsyncConsta public static final String MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES = "migration.data.incremental.deletions.typecodes"; public static final String MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES_ENABLED = "migration.data.incremental.deletions.itemtypes.enabled"; public static final String MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES_ENABLED = "migration.data.incremental.deletions.typecodes.enabled"; - public static final String MIGRATION_DATA_DELETION_ENABLED = "migration.data.incremental.deletions.enabled"; - public static final String MIGRATION_DATA_DELETION_TABLE = "migration.data.incremental.deletions.table"; - // ORACLE_TARGET -- START - public static final String MIGRATION_ORACLE_MAX = "VARCHAR2\\(2147483647\\)"; - public static final String MIGRATION_ORACLE_CLOB = "CLOB"; - public static final String MIGRATION_ORACLE_VARCHAR24k = "VARCHAR2(4000)"; + public static final String MIGRATION_ORACLE_MAX = "VARCHAR2\\(2147483647\\)"; + public static final String MIGRATION_ORACLE_CLOB = "CLOB"; + public static final String MIGRATION_ORACLE_VARCHAR24k = "VARCHAR2(4000)"; - // ORACLE_TARGET -- END - - // DB View support - public static final String MIGRATION_DB_VIEW_NAME_PATTERN = "migration.data.view.name.pattern"; + // DB View support + public static final String MIGRATION_DB_VIEW_NAME_PATTERN = "migration.data.view.name.pattern"; - // DDL View Generation - // property - - public static final String MIGRATION_DATA_VIEW_TBL_GENERATION = "migration.data.view.t.{table}.enabled"; - public static final String MIGRATION_DATA_VIEW_TBL_JOIN_WHERE = "migration.data.view.t.{table}.joinWhereClause"; - public static final String MIGRATION_DATA_VIEW_COL_REPLACEMENT = "migration.data.view.t.{table}.columnTransformation.{column}"; - + // DDL View Generation + // property - private CommercedbsyncConstants() { - // empty to avoid instantiating this constant class - } + public static final String MIGRATION_DATA_VIEW_TBL_GENERATION = "migration.data.view.t.{table}.enabled"; + public static final String MIGRATION_DATA_VIEW_TBL_JOIN_WHERE = "migration.data.view.t.{table}.joinWhereClause"; + public static final String MIGRATION_DATA_VIEW_COL_REPLACEMENT = "migration.data.view.t.{table}.columnTransformation.{column}"; + private CommercedbsyncConstants() { + // empty to avoid instantiating this constant class + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/CopyContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/CopyContext.java index 789d61a..e99c0c9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/CopyContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/CopyContext.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -8,6 +8,8 @@ import com.sap.cx.boosters.commercedbsync.performance.PerformanceProfiler; +import java.io.Serializable; +import java.util.HashMap; import java.util.Map; import java.util.Objects; import java.util.Set; @@ -19,16 +21,19 @@ */ public class CopyContext { - private String migrationId; - private MigrationContext migrationContext; - private Set copyItems; - private PerformanceProfiler performanceProfiler; + private final String migrationId; + private final MigrationContext migrationContext; + private final Set copyItems; + private final PerformanceProfiler performanceProfiler; + private final Map propertyOverrideMap; - public CopyContext(String migrationId, MigrationContext migrationContext, Set copyItems, PerformanceProfiler performanceProfiler) { + public CopyContext(String migrationId, MigrationContext migrationContext, Set copyItems, + PerformanceProfiler performanceProfiler) { this.migrationId = migrationId; this.migrationContext = migrationContext; this.copyItems = copyItems; this.performanceProfiler = performanceProfiler; + this.propertyOverrideMap = new HashMap<>(); } public IdCopyContext toIdCopyContext() { @@ -56,6 +61,10 @@ public PerformanceProfiler getPerformanceProfiler() { return performanceProfiler; } + public Map getPropertyOverrideMap() { + return propertyOverrideMap; + } + public static class DataCopyItem { private String sourceItem; private final String targetItem; @@ -79,12 +88,12 @@ public DataCopyItem(String sourceItem, String targetItem, Map co public String getSourceItem() { return sourceItem; } - + public void setSourceItem(String sourceItem) { - this.sourceItem = sourceItem; - } + this.sourceItem = sourceItem; + } - public String getTargetItem() { + public String getTargetItem() { return targetItem; } @@ -103,18 +112,17 @@ public Long getRowCount() { @Override public String toString() { return new StringJoiner(", ", DataCopyItem.class.getSimpleName() + "[", "]") - .add("sourceItem='" + sourceItem + "'") - .add("targetItem='" + targetItem + "'") - .toString(); + .add("sourceItem='" + sourceItem + "'").add("targetItem='" + targetItem + "'").toString(); } @Override public boolean equals(Object o) { - if (this == o) return true; - if (o == null || getClass() != o.getClass()) return false; + if (this == o) + return true; + if (o == null || getClass() != o.getClass()) + return false; DataCopyItem that = (DataCopyItem) o; - return getSourceItem().equals(that.getSourceItem()) && - getTargetItem().equals(that.getTargetItem()); + return getSourceItem().equals(that.getSourceItem()) && getTargetItem().equals(that.getTargetItem()); } @Override @@ -125,7 +133,8 @@ public int hashCode() { public static class IdCopyContext extends CopyContext { - public IdCopyContext(String migrationId, MigrationContext migrationContext, PerformanceProfiler performanceProfiler) { + public IdCopyContext(String migrationId, MigrationContext migrationContext, + PerformanceProfiler performanceProfiler) { super(migrationId, migrationContext, null, performanceProfiler); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/IncrementalMigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/IncrementalMigrationContext.java index 54808cc..82a71eb 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/IncrementalMigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/IncrementalMigrationContext.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -10,15 +10,16 @@ import java.util.Set; /** - * The MigrationContext contains all information needed to perform a Source -> Target Migration + * The MigrationContext contains all information needed to perform a Source -> + * Target Migration */ public interface IncrementalMigrationContext extends MigrationContext { Instant getIncrementalMigrationTimestamp(); - public void setSchemaMigrationAutoTriggerEnabled(final boolean autoTriggerEnabled); + void setSchemaMigrationAutoTriggerEnabled(final boolean autoTriggerEnabled); - public void setTruncateEnabled(final boolean truncateEnabled); + void setTruncateEnabled(final boolean truncateEnabled); void setIncrementalMigrationTimestamp(final Instant timeStampInstant); @@ -28,7 +29,7 @@ public interface IncrementalMigrationContext extends MigrationContext { void setIncludedTables(final Set includedTables); - public void setDeletionEnabled(boolean deletionEnabled); + void setDeletionEnabled(boolean deletionEnabled); - public void setLpTableMigrationEnabled(boolean lpTableMigrationEnabled); + void setLpTableMigrationEnabled(boolean lpTableMigrationEnabled); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/LaunchOptions.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/LaunchOptions.java new file mode 100644 index 0000000..62438c8 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/LaunchOptions.java @@ -0,0 +1,26 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.context; + +import java.io.Serializable; +import java.util.HashMap; +import java.util.Map; + +public class LaunchOptions { + + public static final LaunchOptions NONE = new LaunchOptions(); + + private final Map propertyOverrideMap; + + public LaunchOptions() { + this.propertyOverrideMap = new HashMap<>(); + } + + public Map getPropertyOverrideMap() { + return propertyOverrideMap; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java index 0763d48..058ac0b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContext.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -14,7 +14,8 @@ import java.util.Set; /** - * The MigrationContext contains all information needed to perform a Source -> Target Migration + * The MigrationContext contains all information needed to perform a Source -> + * Target Migration */ public interface MigrationContext { DataRepository getDataSourceRepository(); @@ -77,84 +78,100 @@ public interface MigrationContext { Instant getIncrementalTimestamp(); - boolean isBulkCopyEnabled(); - int getDataPipeTimeout(); int getDataPipeCapacity(); int getStalledTimeout(); - String getMigrationReportConnectionString(); + String getFileStorageConnectionString(); int getMaxTargetStagedMigrations(); + boolean isDataExportEnabled(); + boolean isDeletionEnabled(); boolean isLpTableMigrationEnabled(); - + + boolean isSchedulerResumeEnabled(); + boolean isFullDatabaseMigration(); - + void setFullDatabaseMigrationEnabled(boolean enabled); - + void refreshSelf(); - /** - * String value which defines name of the view which should be looked up in - * database and check if matches by name. String pattern should be compatible - * with {@code String.format()} pattern. - * E.g.
- *
-	 * String pattern    : "%s_view"
-	 * item type table   :  "products"
-	 * Searched view name: "products_view" 
-	 * 
- * - * @return by default {@code null}. If setting is set in properties, it will return value defined. - */ - String getItemTypeViewNamePattern(); - - /** - * Returns string value which is table/view name for particular table name, - * which follows ItemType View pattern name. - * - * @return by default returns view name, as fallback, it will return origin - * table name - * @throws SQLException when DB error occurs - */ - String getItemTypeViewNameByTable(String tableName, DataRepository repository) throws SQLException; - - /** - * Returns string value which is custom view name for particular table name, - * which follows ItemType View pattern name. - * - * @return by default returns view name, as fallback, it will return origin - * table name - * @throws SQLException when DB error occurs - */ - String getViewWhereClause(final String tableName); - - /** - * Returns list of replacements for particular column from table tableName. - * That returns only custom ones. - * If configuration say: - * - * - * - * - * - * @param tableName table which should be filtered by from properties - * @return map for original column to what it should return - */ - Map getCustomColumnsForView(final String tableName); - - /** - * Return list of table names which have enabled view generation - * - * @return - */ - Set getTablesForViews(); - - - String getViewColumnPrefixFor(String tableName); + /** + * String value which defines name of the view which should be looked up in + * database and check if matches by name. String pattern should be compatible + * with {@code String.format()} pattern. E.g.
+ * + *
+     * String pattern    : "%s_view"
+     * item type table   :  "products"
+     * Searched view name: "products_view"
+     * 
+ * + * @return by default {@code null}. If setting is set in properties, it will + * return value defined. + */ + String getItemTypeViewNamePattern(); + + boolean isLogSql(); + + boolean isLogSqlParamsForSource(); + + int getSqlStoreMemoryFlushThreshold(); + + String getFileStorageContainerName(); + + Set getInputProfiles(); + + Set getOutputProfiles(); + + /** + * Returns string value which is table/view name for particular table name, + * which follows ItemType View pattern name. + * + * @return by default returns view name, as fallback, it will return origin + * table name + * @throws SQLException + * when DB error occurs + */ + String getItemTypeViewNameByTable(String tableName, DataRepository repository) throws SQLException; + + /** + * Returns string value which is custom view name for particular table name, + * which follows ItemType View pattern name. + * + * @return by default returns view name, as fallback, it will return origin + * table name + * @throws SQLException + * when DB error occurs + */ + String getViewWhereClause(final String tableName); + + /** + * Returns list of replacements for particular column from table + * tableName. That returns only custom ones. If configuration say: + * + * + * + * + * + * @param tableName + * table which should be filtered by from properties + * @return map for original column to what it should return + */ + Map getCustomColumnsForView(final String tableName); + + /** + * Return list of table names which have enabled view generation + * + * @return + */ + Set getTablesForViews(); + + String getViewColumnPrefixFor(String tableName); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java new file mode 100644 index 0000000..42b59f1 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/MigrationContextFactory.java @@ -0,0 +1,36 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.context; + +import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; +import com.sap.cx.boosters.commercedbsync.context.impl.DefaultIncrementalMigrationContext; +import com.sap.cx.boosters.commercedbsync.context.impl.DefaultMigrationContext; +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfigurationFactory; +import com.sap.cx.boosters.commercedbsync.repository.impl.DataRepositoryFactory; +import org.apache.commons.configuration.Configuration; + +public class MigrationContextFactory { + final DataRepositoryFactory dataRepositoryFactory; + final DataSourceConfigurationFactory dataSourceConfigurationFactory; + final Configuration configuration; + + public MigrationContextFactory(DataRepositoryFactory dataRepositoryFactory, + DataSourceConfigurationFactory dataSourceConfigurationFactory, Configuration configuration) { + this.dataRepositoryFactory = dataRepositoryFactory; + this.dataSourceConfigurationFactory = dataSourceConfigurationFactory; + this.configuration = configuration; + } + + public MigrationContext create() throws Exception { + if (configuration.getBoolean(CommercedbsyncConstants.MIGRATION_DATA_EXPORT_ENABLED, false)) { + return new DefaultIncrementalMigrationContext(dataRepositoryFactory, dataSourceConfigurationFactory, + configuration); + } + + return new DefaultMigrationContext(dataRepositoryFactory, dataSourceConfigurationFactory, configuration); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java index 748a444..4c1df31 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultIncrementalMigrationContext.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,7 +7,7 @@ package com.sap.cx.boosters.commercedbsync.context.impl; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; -import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfigurationFactory; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.configuration.Configuration; import org.apache.commons.lang.StringUtils; @@ -18,11 +18,8 @@ import java.time.Instant; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.util.Arrays; import java.util.Collections; import java.util.Set; -import java.util.TreeSet; -import java.util.stream.Collectors; public class DefaultIncrementalMigrationContext extends DefaultMigrationContext implements IncrementalMigrationContext { @@ -31,9 +28,10 @@ public class DefaultIncrementalMigrationContext extends DefaultMigrationContext private Set incrementalTables; private Set includedTables; - - public DefaultIncrementalMigrationContext(DataSourceConfiguration sourceDataSourceConfiguration, DataSourceConfiguration targetDataSourceConfiguration, DataRepositoryFactory dataRepositoryFactory, Configuration configuration) throws Exception { - super(sourceDataSourceConfiguration, targetDataSourceConfiguration, dataRepositoryFactory, configuration); + public DefaultIncrementalMigrationContext(final DataRepositoryFactory dataRepositoryFactory, + final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration) + throws Exception { + super(dataRepositoryFactory, dataSourceConfigurationFactory, configuration); } @Override @@ -63,11 +61,11 @@ public Set setIncrementalTables(Set incrementalTables) { return this.incrementalTables = incrementalTables; } - @Override public Set getIncrementalTables() { - return CollectionUtils.isNotEmpty(this.incrementalTables) ? - this.incrementalTables : getListProperty(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_TABLES); + return CollectionUtils.isNotEmpty(this.incrementalTables) + ? this.incrementalTables + : getListProperty(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_TABLES); } @Override @@ -76,7 +74,6 @@ public void setIncrementalModeEnabled(boolean incrementalModeEnabled) { Boolean.toString(incrementalModeEnabled)); } - @Override public Instant getIncrementalTimestamp() { if (null != getIncrementalMigrationTimestamp()) { @@ -97,8 +94,9 @@ public Set getIncludedTables() { if (isIncrementalModeEnabled()) { return Collections.emptySet(); } - return CollectionUtils.isNotEmpty(includedTables) ? includedTables : - getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_INCLUDED); + return CollectionUtils.isNotEmpty(includedTables) + ? includedTables + : getListProperty(CommercedbsyncConstants.MIGRATION_DATA_TABLES_INCLUDED); } @Override @@ -116,31 +114,15 @@ public void setLpTableMigrationEnabled(boolean lpTableMigrationEnabled) { this.lpTableMigrationEnabled = lpTableMigrationEnabled; } - private Set getListProperty(final String key) { - final String tables = super.configuration.getString(key); - - if (StringUtils.isEmpty(tables)) { - return Collections.emptySet(); - } - - final Set result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); - final String[] tablesArray = tables.split(","); - result.addAll(Arrays.stream(tablesArray).collect(Collectors.toSet())); - - return result; + /* + * Fire this method only from HAC controller...not from the jobs. + */ + @Override + public void refreshSelf() { + LOG.info("Refreshing Context"); + // lists + this.setIncludedTables(Collections.emptySet()); + this.setIncrementalTables(Collections.emptySet()); + this.setIncrementalMigrationTimestamp(null); } - // ORACLE_TARGET -- START - /* - * Fire this method only from HAC controller...not from the jobs. - */ - @Override - public void refreshSelf() - { - LOG.info("Refreshing Context"); - // lists - this.setIncludedTables(Collections.emptySet()); - this.setIncrementalTables(Collections.emptySet()); - this.setIncrementalMigrationTimestamp(null); - } - // ORACLE_TARGET -- END } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java index 2acc104..270028f 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/impl/DefaultMigrationContext.java @@ -1,19 +1,16 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.context.impl; - import java.sql.SQLException; import java.time.Instant; import java.time.ZonedDateTime; import java.time.format.DateTimeFormatter; -import java.util.Arrays; import java.util.Collections; -import java.util.HashMap; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -25,13 +22,14 @@ import java.util.TreeSet; import java.util.stream.Collectors; +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; import org.apache.commons.configuration.Configuration; import org.apache.commons.lang.StringUtils; import com.google.common.base.Splitter; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; -import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfigurationFactory; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import com.sap.cx.boosters.commercedbsync.repository.impl.DataRepositoryFactory; @@ -41,16 +39,19 @@ public class DefaultMigrationContext implements MigrationContext { protected boolean deletionEnabled; protected boolean lpTableMigrationEnabled; - protected final Configuration configuration; + protected final Configuration configuration; - public DefaultMigrationContext(final DataSourceConfiguration sourceDataSourceConfiguration, - final DataSourceConfiguration targetDataSourceConfiguration, - final DataRepositoryFactory dataRepositoryFactory, - final Configuration configuration) throws Exception { - this.dataSourceRepository = dataRepositoryFactory.create(sourceDataSourceConfiguration); - this.dataTargetRepository = dataRepositoryFactory.create(targetDataSourceConfiguration); + public DefaultMigrationContext(final DataRepositoryFactory dataRepositoryFactory, + final DataSourceConfigurationFactory dataSourceConfigurationFactory, final Configuration configuration) + throws Exception { this.configuration = configuration; ensureDefaultLocale(configuration); + final Set inputDataSourceConfigurations = getInputProfiles().stream() + .map(dataSourceConfigurationFactory::create).collect(Collectors.toSet()); + final Set outputDataSourceConfigurations = getOutputProfiles().stream() + .map(dataSourceConfigurationFactory::create).collect(Collectors.toSet()); + this.dataSourceRepository = dataRepositoryFactory.create(this, inputDataSourceConfigurations); + this.dataTargetRepository = dataRepositoryFactory.create(this, outputDataSourceConfigurations); } private void ensureDefaultLocale(Configuration configuration) { @@ -59,7 +60,6 @@ private void ensureDefaultLocale(Configuration configuration) { Locale.setDefault(locale); } - @Override public DataRepository getDataSourceRepository() { return dataSourceRepository; @@ -140,7 +140,6 @@ public int getMaxWorkerRetryAttempts() { return getNumericProperty(CommercedbsyncConstants.MIGRATION_DATA_WORKERS_RETRYATTEMPTS); } - @Override public int getMaxParallelTableCopy() { return getNumericProperty(CommercedbsyncConstants.MIGRATION_DATA_MAXPRALLELTABLECOPY); @@ -190,7 +189,6 @@ public Set getDisableAllIndexesIncludedTables() { return getListProperty(CommercedbsyncConstants.MIGRATION_DATA_INDICES_DISABLE_INCLUDED); } - @Override public boolean isClusterMode() { return getBooleanProperty(CommercedbsyncConstants.MIGRATION_CLUSTER_ENABLED); @@ -215,11 +213,6 @@ public Instant getIncrementalTimestamp() { return ZonedDateTime.parse(timeStamp, DateTimeFormatter.ISO_ZONED_DATE_TIME).toInstant(); } - @Override - public boolean isBulkCopyEnabled() { - return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_BULKCOPY_ENABLED); - } - @Override public int getDataPipeTimeout() { return getNumericProperty(CommercedbsyncConstants.MIGRATION_DATA_PIPE_TIMEOUT); @@ -231,8 +224,8 @@ public int getDataPipeCapacity() { } @Override - public String getMigrationReportConnectionString() { - return getStringProperty(CommercedbsyncConstants.MIGRATION_DATA_REPORT_CONNECTIONSTRING); + public String getFileStorageConnectionString() { + return getStringProperty(CommercedbsyncConstants.MIGRATION_FILE_STORAGE_CONNECTIONSTRING); } @Override @@ -240,6 +233,46 @@ public int getMaxTargetStagedMigrations() { return getNumericProperty(CommercedbsyncConstants.MIGRATION_TARGET_MAX_STAGE_MIGRATIONS); } + @Override + public boolean isDataExportEnabled() { + return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_EXPORT_ENABLED); + } + + @Override + public boolean isSchedulerResumeEnabled() { + return getBooleanProperty(CommercedbsyncConstants.MIGRATION_SCHEDULER_RESUME_ENABLED); + } + + @Override + public boolean isLogSql() { + return getBooleanProperty(CommercedbsyncConstants.MIGRATION_LOG_SQL); + } + + @Override + public boolean isLogSqlParamsForSource() { + return getBooleanProperty(CommercedbsyncConstants.MIGRATION_LOG_SQL_PARAMS_SOURCE); + } + + @Override + public int getSqlStoreMemoryFlushThreshold() { + return getNumericProperty(CommercedbsyncConstants.MIGRATION_SQL_STORE_FLUSH_THRESHOLD); + } + + @Override + public String getFileStorageContainerName() { + return getStringProperty(CommercedbsyncConstants.MIGRATION_FILE_STORAGE_CONTAINER_NAME); + } + + @Override + public Set getInputProfiles() { + return getListProperty(CommercedbsyncConstants.MIGRATION_INPUT_PROFILES); + } + + @Override + public Set getOutputProfiles() { + return getListProperty(CommercedbsyncConstants.MIGRATION_OUTPUT_PROFILES); + } + @Override public boolean isDeletionEnabled() { return this.deletionEnabled; @@ -251,17 +284,15 @@ public boolean isLpTableMigrationEnabled() { } @Override - public boolean isFullDatabaseMigration() - { - return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_FULLDATABASE); + public boolean isFullDatabaseMigration() { + return getBooleanProperty(CommercedbsyncConstants.MIGRATION_DATA_FULLDATABASE); } - + @Override - public void setFullDatabaseMigrationEnabled(final boolean enabled) - { - this.configuration.setProperty(CommercedbsyncConstants.MIGRATION_DATA_FULLDATABASE, Boolean.toString(enabled)); - } - + public void setFullDatabaseMigrationEnabled(final boolean enabled) { + this.configuration.setProperty(CommercedbsyncConstants.MIGRATION_DATA_FULLDATABASE, Boolean.toString(enabled)); + } + @Override public void refreshSelf() { @@ -284,7 +315,7 @@ protected String getStringProperty(final String key) { return configuration.getString(key); } - private Set getListProperty(final String key) { + protected Set getListProperty(final String key) { final String tables = configuration.getString(key); if (StringUtils.isEmpty(tables)) { @@ -292,8 +323,7 @@ private Set getListProperty(final String key) { } final Set result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); - final String[] tablesArray = tables.split(","); - result.addAll(Arrays.stream(tablesArray).collect(Collectors.toSet())); + result.addAll(Splitter.on(",").omitEmptyStrings().trimResults().splitToList(tables)); return result; } @@ -308,7 +338,7 @@ private Map> getDynamicPropertyKeys(final String key) { } return map; } - + private Map getDynamicRawProperties(final String key) { final Map map = new TreeMap<>(String.CASE_INSENSITIVE_ORDER); final Configuration subset = configuration.subset(key); @@ -320,41 +350,42 @@ private Map getDynamicRawProperties(final String key) { return map; } - @Override - public String getItemTypeViewNamePattern() { - return getStringProperty(CommercedbsyncConstants.MIGRATION_DB_VIEW_NAME_PATTERN); - } + @Override + public String getItemTypeViewNamePattern() { + return getStringProperty(CommercedbsyncConstants.MIGRATION_DB_VIEW_NAME_PATTERN); + } - @Override + @Override public String getItemTypeViewNameByTable(String tableName, DataRepository repository) throws SQLException { - Set views = repository.getAllViewNames(); - String possibleVieName = String.format(StringUtils.trimToEmpty(getItemTypeViewNamePattern()), tableName); - return views.contains(possibleVieName) ? possibleVieName : tableName; - } - - @Override + Set views = repository.getAllViewNames(); + String possibleVieName = String.format(StringUtils.trimToEmpty(getItemTypeViewNamePattern()), tableName); + return views.contains(possibleVieName) ? possibleVieName : tableName; + } + + @Override public String getViewWhereClause(final String tableName) { - String whereConfigKey = CommercedbsyncConstants.MIGRATION_DATA_VIEW_TBL_JOIN_WHERE.replace("{table}", tableName); - String fromSection = configuration.getString(whereConfigKey); - if (StringUtils.isBlank(fromSection.trim())) { - fromSection = tableName; - } - return fromSection; - } - - @Override - public Map getCustomColumnsForView(final String tableName) { - String tblConfigKey = CommercedbsyncConstants.MIGRATION_DATA_VIEW_COL_REPLACEMENT.replace("{table}", tableName); - String trimToTable = tblConfigKey.replace(".{column}", ""); - return getDynamicRawProperties(trimToTable); - } - - @Override - public Set getTablesForViews() { - Set tables = new HashSet<>(); - String str = CommercedbsyncConstants.MIGRATION_DATA_VIEW_TBL_GENERATION; - // prefix before table placeholder - String key = str.substring(0, str.indexOf("{") - 1); + String whereConfigKey = CommercedbsyncConstants.MIGRATION_DATA_VIEW_TBL_JOIN_WHERE.replace("{table}", + tableName); + String fromSection = configuration.getString(whereConfigKey); + if (StringUtils.isBlank(fromSection.trim())) { + fromSection = tableName; + } + return fromSection; + } + + @Override + public Map getCustomColumnsForView(final String tableName) { + String tblConfigKey = CommercedbsyncConstants.MIGRATION_DATA_VIEW_COL_REPLACEMENT.replace("{table}", tableName); + String trimToTable = tblConfigKey.replace(".{column}", ""); + return getDynamicRawProperties(trimToTable); + } + + @Override + public Set getTablesForViews() { + Set tables = new HashSet<>(); + String str = CommercedbsyncConstants.MIGRATION_DATA_VIEW_TBL_GENERATION; + // prefix before table placeholder + String key = str.substring(0, str.indexOf("{") - 1); final Configuration subset = configuration.subset(key); final Iterator keys = subset.getKeys(); while (keys.hasNext()) { @@ -363,19 +394,18 @@ public Set getTablesForViews() { String subkey = current.replace(key, ""); List subkeyList = Splitter.on(".").splitToList(subkey); if (subkeyList.size() == 2 && "enabled".equals(subkeyList.get(1))) { - boolean val = subset.getBoolean(current, false); - if (val) { - String tablename = Splitter.on(".").splitToList(subkey).get(0); - tables.add(tablename); - } + boolean val = subset.getBoolean(current, false); + if (val) { + String tablename = Splitter.on(".").splitToList(subkey).get(0); + tables.add(tablename); + } } } - return tables; - } - - @Override - public String getViewColumnPrefixFor(final String tableName) - { - return configuration.getString("migration.data.view.t." + tableName +".columnPrefix"); - } + return tables; + } + + @Override + public String getViewColumnPrefixFor(final String tableName) { + return configuration.getString("migration.data.view.t." + tableName + ".columnPrefix"); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/MigrationContextValidator.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/MigrationContextValidator.java index 474e64c..b301a6d 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/MigrationContextValidator.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/MigrationContextValidator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java index 8341853..60e5eb3 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/context/validation/impl/DefaultMigrationContextValidator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -15,23 +15,47 @@ public class DefaultMigrationContextValidator implements MigrationContextValidator { - private static final String MIGRATION_DS_TARGET_DB_URL = "migration.ds.target.db.url"; - private static final String COMMERCE_DB_URL = "db.url"; + private static final String DB_URL_PROPERTY_KEY = "db.url"; private static final String DISABLE_UNLOCKING = "system.unlocking.disabled"; private ConfigurationService configurationService; @Override public void validateContext(final MigrationContext context) { - final String migrationTargetDbUrl = getConfigurationService().getConfiguration().getString(MIGRATION_DS_TARGET_DB_URL); - final String commerceDbUrl = getConfigurationService().getConfiguration().getString(COMMERCE_DB_URL); + checkSourceDbIsNotTargetDb(context); + checkSystemNotLocked(context); + checkDefaultLocaleExists(); + } + + private void checkSourceDbIsNotTargetDb(MigrationContext context) { + if (context.isDataExportEnabled()) { + return; // in this mode, source DB can (should?) be set to CCv2 instance + } + + // Canonically the target should always be the CCV2 DB and we have to verify + // nobody is trying to copy *from* that + final String sourceDbUrl = context.getDataSourceRepository().getDataSourceConfiguration().getConnectionString(); + final String ccv2ManagedDB = getConfigurationService().getConfiguration().getString(DB_URL_PROPERTY_KEY); + + if (sourceDbUrl.equals(ccv2ManagedDB)) { + throw new RuntimeException( + "Invalid data source configuration - cannot use the CCV2-managed database as the source."); + } + } + + private void checkSystemNotLocked(MigrationContext context) { final boolean isSystemLocked = getConfigurationService().getConfiguration().getBoolean(DISABLE_UNLOCKING); - if (migrationTargetDbUrl.equals(commerceDbUrl) && isSystemLocked) { - throw new RuntimeException("Unable to run migration on a locked system. Check property " + DISABLE_UNLOCKING); + if (!context.isDataExportEnabled() && isSystemLocked) { + throw new RuntimeException( + "You cannot run the migration on locked system. Check property " + DISABLE_UNLOCKING); } - //we check this for locale related comparison + } + + private void checkDefaultLocaleExists() { + // we check this for locale related comparison Locale defaultLocale = Locale.getDefault(); if (defaultLocale == null || StringUtils.isEmpty(defaultLocale.toString())) { - throw new RuntimeException("There is no default locale specified on the running server. Set the default locale and try again."); + throw new RuntimeException( + "There is no default locale specified on the running server. Set the default locale and try again."); } } @@ -43,4 +67,4 @@ public void setConfigurationService(ConfigurationService configurationService) { this.configurationService = configurationService; } -} \ No newline at end of file +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/FullMigrationCronJob.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/FullMigrationCronJob.java index a525ff2..e2829ff 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/FullMigrationCronJob.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/FullMigrationCronJob.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,22 +11,20 @@ import de.hybris.platform.jalo.SessionContext; import de.hybris.platform.jalo.type.ComposedType; import org.apache.log4j.Logger; -import com.sap.cx.boosters.commercedbsync.cron.GeneratedFullMigrationCronJob; -public class FullMigrationCronJob extends GeneratedFullMigrationCronJob -{ - @SuppressWarnings("unused") - private static final Logger LOG = Logger.getLogger( FullMigrationCronJob.class.getName() ); - - @Override - protected Item createItem(final SessionContext ctx, final ComposedType type, final Item.ItemAttributeMap allAttributes) throws JaloBusinessException - { - // business code placed here will be executed before the item is created - // then create the item - final Item item = super.createItem( ctx, type, allAttributes ); - // business code placed here will be executed after the item was created - // and return the item - return item; - } - +public class FullMigrationCronJob extends GeneratedFullMigrationCronJob { + @SuppressWarnings("unused") + private static final Logger LOG = Logger.getLogger(FullMigrationCronJob.class.getName()); + + @Override + protected Item createItem(final SessionContext ctx, final ComposedType type, + final Item.ItemAttributeMap allAttributes) throws JaloBusinessException { + // business code placed here will be executed before the item is created + // then create the item + final Item item = super.createItem(ctx, type, allAttributes); + // business code placed here will be executed after the item was created + // and return the item + return item; + } + } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/IncrementalMigrationCronJob.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/IncrementalMigrationCronJob.java index 96e3094..a75cd36 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/IncrementalMigrationCronJob.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/IncrementalMigrationCronJob.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,22 +11,20 @@ import de.hybris.platform.jalo.SessionContext; import de.hybris.platform.jalo.type.ComposedType; import org.apache.log4j.Logger; -import com.sap.cx.boosters.commercedbsync.cron.GeneratedIncrementalMigrationCronJob; -public class IncrementalMigrationCronJob extends GeneratedIncrementalMigrationCronJob -{ - @SuppressWarnings("unused") - private static final Logger LOG = Logger.getLogger( IncrementalMigrationCronJob.class.getName() ); - - @Override - protected Item createItem(final SessionContext ctx, final ComposedType type, final ItemAttributeMap allAttributes) throws JaloBusinessException - { - // business code placed here will be executed before the item is created - // then create the item - final Item item = super.createItem( ctx, type, allAttributes ); - // business code placed here will be executed after the item was created - // and return the item - return item; - } - +public class IncrementalMigrationCronJob extends GeneratedIncrementalMigrationCronJob { + @SuppressWarnings("unused") + private static final Logger LOG = Logger.getLogger(IncrementalMigrationCronJob.class.getName()); + + @Override + protected Item createItem(final SessionContext ctx, final ComposedType type, final ItemAttributeMap allAttributes) + throws JaloBusinessException { + // business code placed here will be executed before the item is created + // then create the item + final Item item = super.createItem(ctx, type, allAttributes); + // business code placed here will be executed after the item was created + // and return the item + return item; + } + } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/MigrationCronJob.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/MigrationCronJob.java index 35e7814..3c2d0a8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/MigrationCronJob.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/cron/MigrationCronJob.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,22 +11,20 @@ import de.hybris.platform.jalo.SessionContext; import de.hybris.platform.jalo.type.ComposedType; import org.apache.log4j.Logger; -import com.sap.cx.boosters.commercedbsync.cron.GeneratedMigrationCronJob; -public class MigrationCronJob extends GeneratedMigrationCronJob -{ - @SuppressWarnings("unused") - private static final Logger LOG = Logger.getLogger( MigrationCronJob.class.getName() ); - - @Override - protected Item createItem(final SessionContext ctx, final ComposedType type, final ItemAttributeMap allAttributes) throws JaloBusinessException - { - // business code placed here will be executed before the item is created - // then create the item - final Item item = super.createItem( ctx, type, allAttributes ); - // business code placed here will be executed after the item was created - // and return the item - return item; - } - +public class MigrationCronJob extends GeneratedMigrationCronJob { + @SuppressWarnings("unused") + private static final Logger LOG = Logger.getLogger(MigrationCronJob.class.getName()); + + @Override + protected Item createItem(final SessionContext ctx, final ComposedType type, final ItemAttributeMap allAttributes) + throws JaloBusinessException { + // business code placed here will be executed before the item is created + // then create the item + final Item item = super.createItem(ctx, type, allAttributes); + // business code placed here will be executed after the item was created + // and return the item + return item; + } + } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataColumn.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataColumn.java index 9c7a165..c069ad2 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataColumn.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataColumn.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java index 829be66..3f9444b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/DataSet.java @@ -1,32 +1,42 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.dataset; -import com.microsoft.sqlserver.jdbc.ISQLServerBulkData; import com.sap.cx.boosters.commercedbsync.dataset.impl.DefaultDataSet; import java.util.Collections; import java.util.List; +import java.util.Objects; public interface DataSet { - DataSet EMPTY = new DefaultDataSet(0, Collections.EMPTY_LIST, Collections.EMPTY_LIST); + DataSet EMPTY = new DefaultDataSet(0, 0, Collections.emptyList(), Collections.emptyList()); + + int getBatchId(); int getColumnCount(); List> getAllResults(); - Object getColumnValue(String column, List row); + Object getColumnValue(String column, List row, DataColumn sourceColumn, int targetColumnType); + + default Object getColumnValue(String column, List row) { + var dataColumn = getColumn(column); - Object getColumnValueForPostGres(String columnName, List row, DataColumn sourceColumnType, int targetColumnType); + Objects.requireNonNull(dataColumn); + + return getColumnValue(column, row, dataColumn, dataColumn.getColumnType()); + } boolean isNotEmpty(); boolean hasColumn(String column); - ISQLServerBulkData toSQLServerBulkData(); + DataColumn getColumn(int columnIndex); + + DataColumn getColumn(String columnName); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/BulkDataSet.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/BulkDataSet.java deleted file mode 100644 index d91d1f8..0000000 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/BulkDataSet.java +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -package com.sap.cx.boosters.commercedbsync.dataset.impl; - -import com.microsoft.sqlserver.jdbc.ISQLServerBulkData; -import org.apache.logging.log4j.util.Strings; -import com.sap.cx.boosters.commercedbsync.dataset.DataColumn; - -import java.sql.SQLException; -import java.sql.Types; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - -public class BulkDataSet extends DefaultDataSet implements ISQLServerBulkData { - - private final Map typeMap = new HashMap<>(); - private int pointer = -1; - private Set columnOrdinals; - - public BulkDataSet(int columnCount, List columnOrder, List> result) { - super(columnCount, columnOrder, result); - this.columnOrdinals = IntStream.range(1, columnOrder.size() + 1).boxed().collect(Collectors.toSet()); - this.typeMap.put(Types.BLOB, new DefaultDataColumn(Strings.EMPTY, Types.LONGVARBINARY, 0x7FFFFFFF, 0)); - } - - @Override - public Set getColumnOrdinals() { - return columnOrdinals; - } - - @Override - public String getColumnName(int i) { - return getColumnOrder().get(i - 1).getColumnName(); - } - - @Override - public int getColumnType(int i) { - return mapColumn(getColumnOrder().get(i - 1)).getColumnType(); - } - - @Override - public int getPrecision(int i) { - return mapColumn(getColumnOrder().get(i - 1)).getPrecision(); - } - - @Override - public int getScale(int i) { - return mapColumn(getColumnOrder().get(i - 1)).getScale(); - } - - @Override - public Object[] getRowData() throws SQLException { - return getAllResults().get(pointer).toArray(); - } - - @Override - public boolean next() throws SQLException { - pointer++; - return getAllResults().size() > pointer; - } - - private DataColumn mapColumn(DataColumn column) { - if (typeMap.containsKey(column.getColumnType())) { - return typeMap.get(column.getColumnType()); - } - return column; - } - -} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataColumn.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataColumn.java index 0b05bc0..eec21e9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataColumn.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataColumn.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java index 1d044f5..8060322 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/dataset/impl/DefaultDataSet.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,7 +7,7 @@ package com.sap.cx.boosters.commercedbsync.dataset.impl; import com.github.freva.asciitable.AsciiTable; -import com.microsoft.sqlserver.jdbc.ISQLServerBulkData; +import org.apache.commons.collections4.IterableUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.commons.lang3.StringUtils; import com.sap.cx.boosters.commercedbsync.dataset.DataColumn; @@ -16,21 +16,28 @@ import javax.annotation.concurrent.Immutable; import java.util.Collections; import java.util.List; -import java.util.stream.Collectors; -import java.util.stream.IntStream; + +import static java.sql.Types.CHAR; +import static java.sql.Types.SMALLINT; @Immutable public class DefaultDataSet implements DataSet { + private final int batchId; private final int columnCount; private final List columnOrder; private final List> result; - public DefaultDataSet(int columnCount, List columnOrder, List> result) { + public DefaultDataSet(int batchId, int columnCount, List columnOrder, List> result) { + this.batchId = batchId; this.columnCount = columnCount; - // TODO REVIEW Downgraded from Java8 to Java11 this.columnOrder = Collections.unmodifiableList(columnOrder); - this.result = Collections.unmodifiableList(result.stream().map(Collections::unmodifiableList).collect(Collectors.toList())); + this.result = result.stream().map(Collections::unmodifiableList).toList(); + } + + @Override + public int getBatchId() { + return batchId; } @Override @@ -44,64 +51,40 @@ public List> getAllResults() { } @Override - public Object getColumnValue(String columnName, List row) { + public Object getColumnValue(String columnName, List row, DataColumn sourceColumn, int targetColumnType) { if (columnName == null || !hasColumn(columnName)) { throw new IllegalArgumentException(String.format("Column %s is not part of the result", columnName)); } - int idx = IntStream.range(0, columnOrder.size()).filter(i -> columnName.equalsIgnoreCase(columnOrder.get(i).getColumnName())).findFirst().getAsInt(); - return row.get(idx); - } - @Override - public Object getColumnValueForPostGres(String columnName, List row, DataColumn sourceColumnType, int targetColumnType) { - if (columnName == null || !hasColumn(columnName)) { - throw new IllegalArgumentException(String.format("Column %s is not part of the result", columnName)); - } - int idx = IntStream.range(0, columnOrder.size()).filter(i -> columnName.equalsIgnoreCase(columnOrder.get(i).getColumnName())).findFirst().getAsInt(); - Object columnValue = row.get(idx); - if(ObjectUtils.isNotEmpty(columnValue)){ - switch (sourceColumnType.getColumnType()) { - case 1: - if (sourceColumnType.getPrecision() == 4 && targetColumnType == 5) { - if (columnValue instanceof String && ((String) columnValue).trim().length() == 1) { - columnValue = (int) (((String) columnValue).trim().charAt(0)); + Object columnValue = row.get(findColumnIndex(columnName)); + + if (ObjectUtils.isNotEmpty(columnValue)) { + switch (sourceColumn.getColumnType()) { + case CHAR : + if (sourceColumn.getPrecision() == 4 && targetColumnType == SMALLINT) { + if (columnValue instanceof String && ((String) columnValue).length() == 4) { + columnValue = (int) (((String) columnValue).charAt(0)); // cannot use trim() to not loose + // \n, \t, space etc chars } } break; - default: - break; - - } - } - return columnValue; - } - - public Object getColumnValueForHANA(String columnName, List row, DataColumn sourceColumnType, int targetColumnType) { - if (columnName == null || !hasColumn(columnName)) { - throw new IllegalArgumentException(String.format("Column %s is not part of the result", columnName)); - } - int idx = IntStream.range(0, columnOrder.size()).filter(i -> columnName.equalsIgnoreCase(columnOrder.get(i).getColumnName())).findFirst().getAsInt(); - Object columnValue = row.get(idx); - if(ObjectUtils.isNotEmpty(columnValue)){ - switch (sourceColumnType.getColumnType()) { - case 1: - if (sourceColumnType.getPrecision() == 4 && targetColumnType == 5) { - if (columnValue instanceof String && ((String) columnValue).trim().length() == 1) { - columnValue = (int) (((String) columnValue).trim().charAt(0)); - } + case SMALLINT : + if (targetColumnType == CHAR && StringUtils.isNumeric(String.valueOf(columnValue))) { + columnValue = Character.toString((char) ((Integer) columnValue).intValue()); } break; - - default: + default : break; + } } + return columnValue; } @Override public boolean isNotEmpty() { - return getAllResults() != null && getAllResults().size() > 0; + return getAllResults() != null && !getAllResults().isEmpty(); } @Override @@ -112,20 +95,25 @@ public boolean hasColumn(String column) { return columnOrder.stream().map(DataColumn::getColumnName).anyMatch(column::equalsIgnoreCase); } - public String toString() { - String[] headers = columnOrder.stream().map(DataColumn::getColumnName).toArray(String[]::new); - String[][] data = getAllResults().stream() - .map(l -> l.stream().map(v -> String.valueOf(v)).toArray(String[]::new)) - .toArray(String[][]::new); - return AsciiTable.getTable(headers, data); + @Override + public DataColumn getColumn(int columnIndex) { + return IterableUtils.get(columnOrder, columnIndex); } - public List getColumnOrder() { - return columnOrder; + @Override + public DataColumn getColumn(String columnName) { + return IterableUtils.get(columnOrder, findColumnIndex(columnName)); } - @Override - public ISQLServerBulkData toSQLServerBulkData() { - return new BulkDataSet(columnCount, columnOrder, result); + protected int findColumnIndex(String columnName) { + return IterableUtils.indexOf(columnOrder, + dataColumn -> dataColumn.getColumnName().equalsIgnoreCase(columnName)); + } + + public String toString() { + String[] headers = columnOrder.stream().map(DataColumn::getColumnName).toArray(String[]::new); + String[][] data = getAllResults().stream().map(l -> l.stream().map(String::valueOf).toArray(String[]::new)) + .toArray(String[][]::new); + return AsciiTable.getTable(headers, data); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/MigrationDataSourceFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/MigrationDataSourceFactory.java index 6955bd8..d0c135b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/MigrationDataSourceFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/MigrationDataSourceFactory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -12,20 +12,17 @@ import javax.sql.DataSource; - /** * Factory to create the DataSources used for Migration */ -public interface MigrationDataSourceFactory -{ - DataSource create(DataSourceConfiguration dataSourceConfiguration); - - - /** - * Generates DataSource configuration with a configuration map - * - * @param dataSourceConfigurationMap - * @return - */ - DataSource create(Map dataSourceConfigurationMap); +public interface MigrationDataSourceFactory { + DataSource create(DataSourceConfiguration dataSourceConfiguration); + + /** + * Generates DataSource configuration with a configuration map + * + * @param dataSourceConfigurationMap + * @return + */ + DataSource create(Map dataSourceConfigurationMap); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/AbstractMigrationDataSourceFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/AbstractMigrationDataSourceFactory.java index 6d8da5f..d00746d 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/AbstractMigrationDataSourceFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/AbstractMigrationDataSourceFactory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/DefaultMigrationDataSourceFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/DefaultMigrationDataSourceFactory.java index c45413f..a52ef78 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/DefaultMigrationDataSourceFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/datasource/impl/DefaultMigrationDataSourceFactory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -14,36 +14,32 @@ import javax.sql.DataSource; - -public class DefaultMigrationDataSourceFactory extends AbstractMigrationDataSourceFactory -{ - - //TODO: resource leak: DataSources are never closed - @Override - public DataSource create(final DataSourceConfiguration dataSourceConfiguration) - { - HikariConfig config = new HikariConfig(); - config.setJdbcUrl(dataSourceConfiguration.getConnectionString()); - config.setDriverClassName(dataSourceConfiguration.getDriver()); - config.setUsername(dataSourceConfiguration.getUserName()); - config.setPassword(dataSourceConfiguration.getPassword()); - config.setMaximumPoolSize(dataSourceConfiguration.getMaxActive()); - config.setMinimumIdle(dataSourceConfiguration.getMinIdle()); - config.setRegisterMbeans(true); - return new HikariDataSource(config); - } - - @Override - public DataSource create(final Map dataSourceConfigurationMap) - { - HikariConfig config = new HikariConfig(); - config.setJdbcUrl((String) dataSourceConfigurationMap.get("connection.url")); - config.setDriverClassName((String) dataSourceConfigurationMap.get("driver")); - config.setUsername((String) dataSourceConfigurationMap.get("username")); - config.setPassword((String) dataSourceConfigurationMap.get("password")); - config.setMaximumPoolSize((Integer) dataSourceConfigurationMap.get("pool.size.max")); - config.setMinimumIdle((Integer) dataSourceConfigurationMap.get("pool.size.idle.min")); - config.setRegisterMbeans((Boolean) dataSourceConfigurationMap.get("registerMbeans")); - return new HikariDataSource(config); - } +public class DefaultMigrationDataSourceFactory extends AbstractMigrationDataSourceFactory { + + // TODO: resource leak: DataSources are never closed + @Override + public DataSource create(final DataSourceConfiguration dataSourceConfiguration) { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl(dataSourceConfiguration.getConnectionString()); + config.setDriverClassName(dataSourceConfiguration.getDriver()); + config.setUsername(dataSourceConfiguration.getUserName()); + config.setPassword(dataSourceConfiguration.getPassword()); + config.setMaximumPoolSize(dataSourceConfiguration.getMaxActive()); + config.setMinimumIdle(dataSourceConfiguration.getMinIdle()); + config.setRegisterMbeans(true); + return new HikariDataSource(config); + } + + @Override + public DataSource create(final Map dataSourceConfigurationMap) { + HikariConfig config = new HikariConfig(); + config.setJdbcUrl((String) dataSourceConfigurationMap.get("connection.url")); + config.setDriverClassName((String) dataSourceConfigurationMap.get("driver")); + config.setUsername((String) dataSourceConfigurationMap.get("username")); + config.setPassword((String) dataSourceConfigurationMap.get("password")); + config.setMaximumPoolSize((Integer) dataSourceConfigurationMap.get("pool.size.max")); + config.setMinimumIdle((Integer) dataSourceConfigurationMap.get("pool.size.idle.min")); + config.setRegisterMbeans((Boolean) dataSourceConfigurationMap.get("registerMbeans")); + return new HikariDataSource(config); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java index fc26fe8..21625c5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyCompleteEvent.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,7 +11,7 @@ */ public class CopyCompleteEvent extends CopyEvent { - private Boolean copyResult = false; + private final Boolean copyResult = false; public CopyCompleteEvent(final Integer sourceNodeId, final String migrationId) { super(sourceNodeId, migrationId); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java index b336fb4..31c76a8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyDatabaseTableEvent.java @@ -1,15 +1,31 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.events; +import java.io.Serializable; +import java.util.Map; + /** * Cluster Event to notify a Cluster to start the copy process */ public class CopyDatabaseTableEvent extends CopyEvent { - public CopyDatabaseTableEvent(final Integer sourceNodeId, final String migrationId) { + + /** + * contains property value updates that should be populated in the cluster + */ + private final Map propertyOverrideMap; + + public CopyDatabaseTableEvent(final Integer sourceNodeId, final String migrationId, + Map propertyOverrideMap) { super(sourceNodeId, migrationId); + this.propertyOverrideMap = propertyOverrideMap; + } + + public Map getPropertyOverrideMap() { + return propertyOverrideMap; } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyEvent.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyEvent.java index 7408e16..46b30a6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyEvent.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/CopyEvent.java @@ -1,15 +1,15 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.events; import de.hybris.platform.servicelayer.event.ClusterAwareEvent; import de.hybris.platform.servicelayer.event.PublishEventContext; import de.hybris.platform.servicelayer.event.events.AbstractEvent; - /** * ClusterAwareEvent to notify other Nodes to start the migration */ @@ -37,7 +37,6 @@ public int getSourceNodeId() { return sourceNodeId; } - /** * @return the migrationId */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java index 9f1bcad..b32bbc2 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyCompleteEventListener.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -38,64 +38,57 @@ public class CopyCompleteEventListener extends AbstractEventListener postProcessors; - @Override - protected void onEvent(final CopyCompleteEvent event) { - final String migrationId = event.getMigrationId(); + @Override + protected void onEvent(final CopyCompleteEvent event) { + final String migrationId = event.getMigrationId(); - LOG.info("Migration finished on Node {} with result {}", event.getSourceNodeId(), event.getCopyResult()); - final CopyContext copyContext = new CopyContext(migrationId, migrationContext, new HashSet<>(), - performanceProfiler); + LOG.info("Migration finished on Node {} with result {}", event.getSourceNodeId(), event.getCopyResult()); + final CopyContext copyContext = new CopyContext(migrationId, migrationContext, new HashSet<>(), + performanceProfiler); executePostProcessors(copyContext); } /** - * Runs through all the Post Processors in a transaction to avoid multiple executions + * Runs through all the Post Processors in a transaction to avoid multiple + * executions * * @param copyContext */ - private void executePostProcessors(final CopyContext copyContext) - { - try - { - Transaction.current().execute(new TransactionBody() - { - @Override - public Object execute() throws Exception - { - - final boolean eligibleForPostProcessing = databaseCopyTaskRepository.setMigrationStatus(copyContext, - MigrationProgress.PROCESSED, MigrationProgress.POSTPROCESSING) - || databaseCopyTaskRepository.setMigrationStatus(copyContext, MigrationProgress.ABORTED, - MigrationProgress.POSTPROCESSING); - - if (eligibleForPostProcessing) - { - LOG.info("Starting PostProcessor execution"); - postProcessors.forEach(p -> p.process(copyContext)); - - databaseCopyTaskRepository.setMigrationStatus(copyContext, MigrationProgress.POSTPROCESSING, - MigrationProgress.COMPLETED); - LOG.info("Finishing PostProcessor execution"); - } - - return null; - } - }); - } - catch (final Exception e) - { - LOG.error("Error during PostProcessor execution", e); - if (e instanceof RuntimeException re) - { - throw re; - } - else - { - throw new RuntimeException(e); - } - } - } + private void executePostProcessors(final CopyContext copyContext) { + try { + Transaction.current().execute(new TransactionBody() { + @Override + public Object execute() throws Exception { + + final boolean eligibleForPostProcessing = databaseCopyTaskRepository.setMigrationStatus(copyContext, + MigrationProgress.PROCESSED, MigrationProgress.POSTPROCESSING) + || databaseCopyTaskRepository.setMigrationStatus(copyContext, MigrationProgress.ABORTED, + MigrationProgress.POSTPROCESSING); + + if (eligibleForPostProcessing) { + LOG.info("Starting PostProcessor execution"); + + postProcessors.stream().filter(p -> p.shouldExecute(copyContext)) + .forEach(p -> p.process(copyContext)); + + databaseCopyTaskRepository.setMigrationStatus(copyContext, MigrationProgress.POSTPROCESSING, + MigrationProgress.COMPLETED); + LOG.info("Finishing PostProcessor execution"); + } + + return null; + } + }); + } catch (final Exception e) { + LOG.error("Error during PostProcessor execution", e); + if (e instanceof RuntimeException re) { + throw re; + } else { + throw new RuntimeException(e); + } + } + } public void setDatabaseCopyTaskRepository(final DatabaseCopyTaskRepository databaseCopyTaskRepository) { this.databaseCopyTaskRepository = databaseCopyTaskRepository; @@ -105,11 +98,11 @@ public void setMigrationContext(final MigrationContext migrationContext) { this.migrationContext = migrationContext; } - public void setPerformanceProfiler(final PerformanceProfiler performanceProfiler) { - this.performanceProfiler = performanceProfiler; - } + public void setPerformanceProfiler(final PerformanceProfiler performanceProfiler) { + this.performanceProfiler = performanceProfiler; + } - public void setPostProcessors(final List postProcessors) { - this.postProcessors = postProcessors; - } + public void setPostProcessors(final List postProcessors) { + this.postProcessors = postProcessors; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java index e7372df..c5acbc7 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/events/handlers/CopyDatabaseTableEventListener.java @@ -1,30 +1,32 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.events.handlers; -import com.sap.cx.boosters.commercedbsync.events.CopyDatabaseTableEvent; +import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; +import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationCopyService; import de.hybris.platform.servicelayer.cluster.ClusterService; +import de.hybris.platform.servicelayer.config.ConfigurationService; import de.hybris.platform.servicelayer.event.impl.AbstractEventListener; import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.events.CopyDatabaseTableEvent; import com.sap.cx.boosters.commercedbsync.performance.PerformanceProfiler; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTask; -import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationCopyService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; +import java.io.Serializable; import java.util.HashSet; +import java.util.Map; import java.util.Set; import java.util.stream.Collectors; -import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MDC_CLUSTERID; -import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MDC_MIGRATIONID; - /** * Listener that starts the Migration Process on a given node */ @@ -41,27 +43,36 @@ public class CopyDatabaseTableEventListener extends AbstractEventListener(), performanceProfiler); + try (MDC.MDCCloseable ignored = MDC.putCloseable(CommercedbsyncConstants.MDC_MIGRATIONID, migrationId); + MDC.MDCCloseable ignored2 = MDC.putCloseable(CommercedbsyncConstants.MDC_CLUSTERID, + String.valueOf(clusterService.getClusterId()))) { + CopyContext copyContext = new CopyContext(migrationId, migrationContext, new HashSet<>(), + performanceProfiler); Set copyTableTasks = databaseCopyTaskRepository.findPendingTasks(copyContext); - Set items = copyTableTasks.stream().map(task -> new CopyContext.DataCopyItem(task.getSourcetablename(), task.getTargettablename(), task.getColumnmap(), task.getSourcerowcount())).collect(Collectors.toSet()); + Set items = copyTableTasks + .stream().map(task -> new CopyContext.DataCopyItem(task.getSourcetablename(), + task.getTargettablename(), task.getColumnmap(), task.getSourcerowcount())) + .collect(Collectors.toSet()); copyContext.getCopyItems().addAll(items); databaseMigrationCopyService.copyAllAsync(copyContext); - } catch (Exception e) { throw new RuntimeException(e); } } + private void processPropertyOverrides(Map propertyOverrideMap) { + propertyOverrideMap.forEach((key, value) -> { + configurationService.getConfiguration().setProperty(key, String.valueOf(value)); + }); + } public void setDatabaseMigrationCopyService(final DatabaseMigrationCopyService databaseMigrationCopyService) { this.databaseMigrationCopyService = databaseMigrationCopyService; @@ -85,4 +96,7 @@ public void setClusterService(ClusterService clusterService) { this.clusterService = clusterService; } + public void setConfigurationService(ConfigurationService configurationService) { + this.configurationService = configurationService; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/DataCopyTableFilter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/DataCopyTableFilter.java index 6457cff..80b5e8e 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/DataCopyTableFilter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/DataCopyTableFilter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/CompositeDataCopyTableFilter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/CompositeDataCopyTableFilter.java index 0a883c6..5642562 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/CompositeDataCopyTableFilter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/CompositeDataCopyTableFilter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/ExclusionDataCopyTableFilter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/ExclusionDataCopyTableFilter.java index 9d8062e..338a787 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/ExclusionDataCopyTableFilter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/ExclusionDataCopyTableFilter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/InclusionDataCopyTableFilter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/InclusionDataCopyTableFilter.java index 4a3d02d..91afb96 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/InclusionDataCopyTableFilter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/InclusionDataCopyTableFilter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/IncrementalDataCopyTableFilter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/IncrementalDataCopyTableFilter.java index cf6ff4b..5047702 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/IncrementalDataCopyTableFilter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/filter/impl/IncrementalDataCopyTableFilter.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -24,7 +24,8 @@ public Predicate filter(MigrationContext context) { } Set incrementalTables = context.getIncrementalTables(); if (incrementalTables == null || incrementalTables.isEmpty()) { - throw new IllegalStateException("At least one table for incremental copy must be specified. Check property " + CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_TABLES); + throw new IllegalStateException("At least one table for incremental copy must be specified. Check property " + + CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_TABLES); } return p -> incrementalTables.stream().anyMatch(e -> StringUtils.equalsIgnoreCase(e, p)); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/interceptors/DefaultCMTRemoveInterceptor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/interceptors/DefaultCMTRemoveInterceptor.java index bc80218..5097bc5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/interceptors/DefaultCMTRemoveInterceptor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/interceptors/DefaultCMTRemoveInterceptor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -16,9 +16,11 @@ import de.hybris.platform.servicelayer.model.ModelService; import de.hybris.platform.servicelayer.type.TypeService; import de.hybris.platform.util.Config; + import java.util.Collections; import java.util.List; import javax.annotation.Nonnull; + import org.apache.commons.lang.StringUtils; import com.sap.cx.boosters.commercedbsync.enums.ItemChangeType; import com.sap.cx.boosters.commercedbsync.model.ItemDeletionMarkerModel; @@ -27,91 +29,88 @@ public class DefaultCMTRemoveInterceptor implements RemoveInterceptor { - private static final Logger LOG = LoggerFactory.getLogger(DefaultCMTRemoveInterceptor.class); + private static final Logger LOG = LoggerFactory.getLogger(DefaultCMTRemoveInterceptor.class); - private static final boolean deletionsEnabled = Config.getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES_ENABLED,false); + private static final boolean deletionsEnabled = Config + .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES_ENABLED, false); - private static final String COMMA_SEPERATOR = ","; + private static final String COMMA_SEPERATOR = ","; - private ModelService modelService; - private TypeService typeService; + private ModelService modelService; + private TypeService typeService; - @Override - public void onRemove(@Nonnull final ItemModel model, @Nonnull final InterceptorContext ctx) { + @Override + public void onRemove(@Nonnull final ItemModel model, @Nonnull final InterceptorContext ctx) { - if (!deletionsEnabled ) { - if (LOG.isDebugEnabled()) { - LOG.debug("CMT deletions is not enabled for ItemModel."); - } - return; - } + if (!deletionsEnabled) { + if (LOG.isDebugEnabled()) { + LOG.debug("CMT deletions is not enabled for ItemModel."); + } + return; + } - List deletionsItemType = getListDeletionsItemType(); + List deletionsItemType = getListDeletionsItemType(); - if ( deletionsItemType == null || deletionsItemType.isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("No table defined to create a deletion record for CMT "); - } - return; + if (deletionsItemType == null || deletionsItemType.isEmpty()) { + if (LOG.isDebugEnabled()) { + LOG.debug("No table defined to create a deletion record for CMT "); + } + return; + } + + if (deletionsItemType.contains(model.getItemtype().toLowerCase())) { + + ItemDeletionMarkerModel idm = null; + try { + if (LOG.isDebugEnabled()) { + LOG.info("inside remove DefaultCMTRemoveInterceptor for " + + typeService.getComposedTypeForCode(model.getItemtype()).getTable()); + } + + idm = modelService.create(ItemDeletionMarkerModel.class); + fillInitialDeletionMarker(idm, model.getPk().getLong(), + typeService.getComposedTypeForCode(model.getItemtype()).getTable()); + modelService.save(idm); + + } catch (ModelSavingException ex) { + LOG.error("Exception during save for CMT table {} , PK : {} ", model.getItemtype(), model.getPk()); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("Table {} not defined for CMT deletion record", model.getItemtype()); + } + } } - if (deletionsItemType.contains(model.getItemtype().toLowerCase())) { + private void fillInitialDeletionMarker(final ItemDeletionMarkerModel marker, final Long itemPK, + final String table) { + Preconditions.checkNotNull(marker, "ItemDeletionMarker cannot be null in this place"); + Preconditions.checkArgument(marker.getItemModelContext().isNew(), "ItemDeletionMarker must be new"); - ItemDeletionMarkerModel idm = null; - try { - if(LOG.isDebugEnabled()){ - LOG.info("inside remove DefaultCMTRemoveInterceptor for" + String - .valueOf(typeService.getComposedTypeForCode(model.getItemtype()).getTable())); + marker.setItemPK(itemPK); + marker.setTable(table); + marker.setChangeType(ItemChangeType.DELETED); + } + + private List getListDeletionsItemType() { + // TO DO change to static variable + final String itemTypes = Config + .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES, ""); + if (StringUtils.isEmpty(itemTypes)) { + return Collections.emptyList(); } + List result = Splitter.on(COMMA_SEPERATOR).omitEmptyStrings().trimResults() + .splitToList(itemTypes.toLowerCase()); - idm = modelService.create(ItemDeletionMarkerModel.class); - fillInitialDeletionMarker(idm, model.getPk().getLong(), - typeService.getComposedTypeForCode(model.getItemtype()).getTable()); - modelService.save(idm); - - } catch (ModelSavingException ex) { - LOG.error("Exception during save for CMT table {} , PK : {} ", model.getItemtype(), - model.getPk()); - } - } else { - if (LOG.isDebugEnabled()) { - LOG.debug("Table {} not defined for CMT deletion record", model.getItemtype()); - } - } - } - - private void fillInitialDeletionMarker(final ItemDeletionMarkerModel marker, final Long itemPK, - final String table) { - Preconditions.checkNotNull(marker, "ItemDeletionMarker cannot be null in this place"); - Preconditions - .checkArgument(marker.getItemModelContext().isNew(), "ItemDeletionMarker must be new"); - - marker.setItemPK(itemPK); - marker.setTable(table); - marker.setChangeType(ItemChangeType.DELETED); - } - - private List getListDeletionsItemType() { - // TO DO change to static variable - final String itemTypes = Config.getString( - CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES, ""); - if (StringUtils.isEmpty(itemTypes)) { - return Collections.emptyList(); + return result; } - List result = Splitter.on(COMMA_SEPERATOR) - .omitEmptyStrings() - .trimResults() - .splitToList(itemTypes.toLowerCase()); - return result; - } - - public void setModelService(final ModelService modelService) { - this.modelService = modelService; - } + public void setModelService(final ModelService modelService) { + this.modelService = modelService; + } - public void setTypeService(final TypeService typeService) { - this.typeService = typeService; - } + public void setTypeService(final TypeService typeService) { + this.typeService = typeService; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java index 34243c3..080b02e 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jalo/ItemDeletionMarker.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,22 +11,20 @@ import de.hybris.platform.jalo.SessionContext; import de.hybris.platform.jalo.type.ComposedType; import org.apache.log4j.Logger; -import com.sap.cx.boosters.commercedbsync.jalo.GeneratedItemDeletionMarker; -public class ItemDeletionMarker extends GeneratedItemDeletionMarker -{ - @SuppressWarnings("unused") - private static final Logger LOG = Logger.getLogger( ItemDeletionMarker.class.getName() ); - - @Override - protected Item createItem(final SessionContext ctx, final ComposedType type, final ItemAttributeMap allAttributes) throws JaloBusinessException - { - // business code placed here will be executed before the item is created - // then create the item - final Item item = super.createItem( ctx, type, allAttributes ); - // business code placed here will be executed after the item was created - // and return the item - return item; - } - +public class ItemDeletionMarker extends GeneratedItemDeletionMarker { + @SuppressWarnings("unused") + private static final Logger LOG = Logger.getLogger(ItemDeletionMarker.class.getName()); + + @Override + protected Item createItem(final SessionContext ctx, final ComposedType type, final ItemAttributeMap allAttributes) + throws JaloBusinessException { + // business code placed here will be executed before the item is created + // then create the item + final Item item = super.createItem(ctx, type, allAttributes); + // business code placed here will be executed after the item was created + // and return the item + return item; + } + } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/AbstractMigrationJobPerformable.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/AbstractMigrationJobPerformable.java index 606de17..86c53b6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/AbstractMigrationJobPerformable.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/AbstractMigrationJobPerformable.java @@ -1,10 +1,12 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.jobs; +import java.io.Serializable; import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; @@ -12,8 +14,12 @@ import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.util.Arrays; +import java.util.Optional; import java.util.Set; +import com.sap.cx.boosters.commercedbsync.context.LaunchOptions; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.model.cron.MigrationCronJobModel; import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; @@ -40,14 +46,16 @@ public abstract class AbstractMigrationJobPerformable extends AbstractJobPerform private static final Logger LOG = LoggerFactory.getLogger(AbstractMigrationJobPerformable.class); - private static final String[] RUNNING_MIGRATION = new String[] { MigrationProgress.RUNNING.toString(), MigrationProgress.PROCESSED.toString(), MigrationProgress.POSTPROCESSING.toString() }; - private static final String[] TYPE_SYSTEM_RELATED_TYPES = new String[]{"atomictypes", "attributeDescriptors", "collectiontypes", "composedtypes", "enumerationvalues", "maptypes"}; + private static final String[] RUNNING_MIGRATION = new String[]{MigrationProgress.RUNNING.toString(), + MigrationProgress.PROCESSED.toString(), MigrationProgress.POSTPROCESSING.toString()}; + private static final String[] TYPE_SYSTEM_RELATED_TYPES = new String[]{"atomictypes", "attributeDescriptors", + "collectiontypes", "composedtypes", "enumerationvalues", "maptypes"}; - private static final String MIGRATION_UPDATE_TYPE_SYSTEM = "migration.ds.update.typesystem.table"; + private static final String MIGRATION_UPDATE_TYPE_SYSTEM = "migration.ds.update.typesystem.table"; private static final String SOURCE_TYPESYSTEMNAME = "migration.ds.source.db.typesystemname"; private static final String SOURCE_TYPESYSTEMSUFFIX = "migration.ds.source.db.typesystemsuffix"; - + // spotless:off private static final String TYPESYSTEM_SELECT_STATEMENT = "IF (EXISTS (SELECT * \n" + " FROM INFORMATION_SCHEMA.TABLES \n" + " WHERE TABLE_SCHEMA = '%s' \n" + @@ -55,196 +63,196 @@ public abstract class AbstractMigrationJobPerformable extends AbstractJobPerform "BEGIN\n" + " select name from %2$s where state = 'current'\n" + "END"; + // spotless:on - - protected DatabaseMigrationService databaseMigrationService; - protected IncrementalMigrationContext incrementalMigrationContext; + protected DatabaseMigrationService databaseMigrationService; + protected MigrationContext migrationContext; protected CronJobService cronJobService; protected String currentMigrationId; - private JdbcTemplate jdbcTemplate; + private JdbcTemplate jdbcTemplate; @Override - public boolean isPerformable() - { - for(CronJobModel cronJob : getCronJobService().getRunningOrRestartedCronJobs()){ - if ((cronJob instanceof IncrementalMigrationCronJobModel - || cronJob instanceof FullMigrationCronJobModel)) { - LOG.info("Previous migrations job already running {} and Type {} ", cronJob.getCode(), cronJob.getItemtype()); + public boolean isPerformable() { + for (CronJobModel cronJob : getCronJobService().getRunningOrRestartedCronJobs()) { + if ((cronJob instanceof IncrementalMigrationCronJobModel || cronJob instanceof FullMigrationCronJobModel)) { + LOG.info("Previous migrations job already running {} and Type {} ", cronJob.getCode(), + cronJob.getItemtype()); return false; } } return true; } - /* - * ORACLE_TARGET - START The updateTypesystemTabl() also updates the TS. There is scope to make these 2 update - * methods efficient i.e set the TS only once. - */ - - protected void updateSourceTypesystemProperty() throws Exception - { - // Disabling Post processor - Config.setParameter("migration.data.postprocessor.tscheck.disable", "yes"); - - if(BooleanUtils.isFalse(Config.getBoolean(MIGRATION_UPDATE_TYPE_SYSTEM, false))){ - return; - } - DataRepository sourceRepository = incrementalMigrationContext.getDataSourceRepository(); - try( - Connection connection = sourceRepository.getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(String.format(TYPESYSTEM_SELECT_STATEMENT, - sourceRepository.getDataSourceConfiguration().getSchema(), "CCV2_TYPESYSTEM_MIGRATIONS")); - ) { - LOG.debug("SETTING the Type System from CCV2_TYPESYSTEM_MIGRATIONS" + String.format(TYPESYSTEM_SELECT_STATEMENT, - sourceRepository.getDataSourceConfiguration().getSchema(), "CCV2_TYPESYSTEM_MIGRATIONS")); - - String typeSystemName = null; - if (resultSet.next()) - { - typeSystemName = resultSet.getString("name"); - } - else - { - return; - } - if (typeSystemName != null && !typeSystemName.isEmpty()) - { - Config.setParameter(SOURCE_TYPESYSTEMNAME, typeSystemName); - LOG.info("SETTING typeSystemName = " + typeSystemName); - return; - } - } - } + /* + * ORACLE_TARGET - START The updateTypesystemTabl() also updates the TS. There + * is scope to make these 2 update methods efficient i.e set the TS only once. + */ + + protected void updateSourceTypesystemProperty() throws Exception { + // Disabling Post processor + Config.setParameter("migration.data.postprocessor.tscheck.disable", "true"); + + if (BooleanUtils.isFalse(Config.getBoolean(MIGRATION_UPDATE_TYPE_SYSTEM, false))) { + return; + } + DataRepository sourceRepository = migrationContext.getDataSourceRepository(); + try (Connection connection = sourceRepository.getConnection(); + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(String.format(TYPESYSTEM_SELECT_STATEMENT, + sourceRepository.getDataSourceConfiguration().getSchema(), "CCV2_TYPESYSTEM_MIGRATIONS"))) { + LOG.debug("SETTING the Type System from CCV2_TYPESYSTEM_MIGRATIONS" + + String.format(TYPESYSTEM_SELECT_STATEMENT, + sourceRepository.getDataSourceConfiguration().getSchema(), "CCV2_TYPESYSTEM_MIGRATIONS")); + + String typeSystemName; + if (resultSet.next()) { + typeSystemName = resultSet.getString("name"); + + if (StringUtils.isNotEmpty(typeSystemName)) { + Config.setParameter(SOURCE_TYPESYSTEMNAME, typeSystemName); + LOG.info("SETTING typeSystemName = " + typeSystemName); + } + } + } + } + protected void updateTypesystemTable(Set migrationItems) throws Exception { - if(BooleanUtils.isFalse(Config.getBoolean(MIGRATION_UPDATE_TYPE_SYSTEM, false))){ - return; - } - DataRepository sourceRepository = incrementalMigrationContext.getDataSourceRepository(); - for(final String tableName: migrationItems){ - if(Arrays.stream(TYPE_SYSTEM_RELATED_TYPES).anyMatch(t -> StringUtils.startsWithIgnoreCase(tableName, t))) - { - try ( - Connection connection = sourceRepository.getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(String.format(TYPESYSTEM_SELECT_STATEMENT, - sourceRepository.getDataSourceConfiguration().getSchema(),"CCV2_TYPESYSTEM_MIGRATIONS")); - ) - { - LOG.debug("Type System table - table found in list, get latest TS => " + String.format(TYPESYSTEM_SELECT_STATEMENT, - sourceRepository.getDataSourceConfiguration().getSchema(), "CCV2_TYPESYSTEM_MIGRATIONS")); - String typeSystemName = null; - if (resultSet.next()) { - typeSystemName = resultSet.getString("name");; - } else{ - return; - } - - final String tsBaseTableName = extractTSbaseTableName(tableName); - - LOG.info("Type System table - table found in list, get latest Table name " + String.format( - "SELECT TableName FROM %s WHERE Typecode IS NOT NULL AND TableName LIKE '%s' AND TypeSystemName = '%s'", - CommercedbsyncConstants.DEPLOYMENTS_TABLE, tsBaseTableName + "%", typeSystemName)); - final String typeSystemTablesQuery = String.format( - "SELECT TableName FROM %s WHERE Typecode IS NOT NULL AND TableName LIKE '%s' AND TypeSystemName = '%s'", - CommercedbsyncConstants.DEPLOYMENTS_TABLE, tsBaseTableName + "%", typeSystemName); - final ResultSet typeSystemtableresultSet = stmt.executeQuery(typeSystemTablesQuery); - String typeSystemTableName = null; - if (typeSystemtableresultSet.next()) - { - typeSystemTableName = typeSystemtableresultSet.getString("TableName"); - } - // ORACLE_TARGET - START, add null check and return; - if (typeSystemTableName != null) - { - Config.setParameter(SOURCE_TYPESYSTEMNAME, typeSystemName); - final String typesystemsuffix = typeSystemTableName.substring(tsBaseTableName.length()); - - Config.setParameter(SOURCE_TYPESYSTEMSUFFIX, typesystemsuffix); - LOG.info("typeSystemName = " + typeSystemName + ",typesystemsuffix = " + typesystemsuffix); - return; - } - } - } - } - } - - /* - * If enumerationvalueslp, then extract enumerationvalues as base table name. - */ - private String extractTSbaseTableName(final String tableNameFromMigrationItems) - { - String tsBaseTableName = tableNameFromMigrationItems; - - // if it ends with lp - if (tableNameFromMigrationItems.toLowerCase().endsWith("lp")) - { - tsBaseTableName = tableNameFromMigrationItems.substring(0, tableNameFromMigrationItems.length() - 2); - } - - return tsBaseTableName; - } - - protected MigrationStatus waitForFinishCronjobs(IncrementalMigrationContext context, String migrationID, - final CronJobModel cronJobModel) throws Exception { - MigrationStatus status; - Thread.sleep(5000); - boolean aborted = false; - long since = 0; - do { - OffsetDateTime sinceTime = OffsetDateTime.ofInstant(Instant.ofEpochMilli(since), ZoneOffset.UTC); - status = databaseMigrationService.getMigrationState(context, migrationID,sinceTime); - Thread.sleep(5000); - since = System.currentTimeMillis(); - if (isJobStateAborted(cronJobModel)) - { - aborted = true; - break; - } - } while (StringUtils.equalsAnyIgnoreCase(status.getStatus().toString(), RUNNING_MIGRATION)); - - if (aborted) - { - LOG.info(" Aborted ...STOPPING migration "); - databaseMigrationService.stopMigration(incrementalMigrationContext, currentMigrationId); - LOG.error("Database migration has been ABORTED, Migration State= " + status + ", Total Tasks " - + status.getTotalTasks() + ", migration id =" + status.getMigrationID() + ", Completed Tasks " - + status.getCompletedTasks()); - clearAbortRequestedIfNeeded(cronJobModel); - throw new AbortCronJobException("CronJOB ABORTED"); - } - - if (status.isFailed()) { - LOG.error("Database migration FAILED, Migration State= " + status + ", Total Tasks " - + status.getTotalTasks() + ", migration id =" + status.getMigrationID() + ", Completed Tasks " - + status.getCompletedTasks()); - throw new Exception("Database migration failed"); - } - - return status; - } - - protected boolean isJobStateAborted(final CronJobModel cronJobModel) - { - this.modelService.refresh(cronJobModel); - LOG.info("cron job status = " + cronJobModel.getStatus()); - LOG.info("cron job request to abort =" + cronJobModel.getRequestAbort()); - return ((cronJobModel.getStatus() == CronJobStatus.ABORTED) - || (cronJobModel.getRequestAbort() == null ? false : cronJobModel.getRequestAbort())); - } + if (BooleanUtils.isFalse(Config.getBoolean(MIGRATION_UPDATE_TYPE_SYSTEM, false))) { + return; + } + DataRepository sourceRepository = migrationContext.getDataSourceRepository(); + for (final String tableName : migrationItems) { + if (Arrays.stream(TYPE_SYSTEM_RELATED_TYPES) + .anyMatch(t -> StringUtils.startsWithIgnoreCase(tableName, t))) { + try (Connection connection = sourceRepository.getConnection(); + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(String.format(TYPESYSTEM_SELECT_STATEMENT, + sourceRepository.getDataSourceConfiguration().getSchema(), + "CCV2_TYPESYSTEM_MIGRATIONS"))) { + LOG.debug("Type System table - table found in list, get latest TS => " + String.format( + TYPESYSTEM_SELECT_STATEMENT, sourceRepository.getDataSourceConfiguration().getSchema(), + "CCV2_TYPESYSTEM_MIGRATIONS")); + String typeSystemName = null; + if (resultSet.next()) { + typeSystemName = resultSet.getString("name"); + } else { + return; + } + + final String tsBaseTableName = extractTSbaseTableName(tableName); + + LOG.info("Type System table - table found in list, get latest Table name " + String.format( + "SELECT TableName FROM %s WHERE Typecode IS NOT NULL AND TableName LIKE '%s' AND TypeSystemName = '%s'", + CommercedbsyncConstants.DEPLOYMENTS_TABLE, tsBaseTableName + "%", typeSystemName)); + final String typeSystemTablesQuery = String.format( + "SELECT TableName FROM %s WHERE Typecode IS NOT NULL AND TableName LIKE '%s' AND TypeSystemName = '%s'", + CommercedbsyncConstants.DEPLOYMENTS_TABLE, tsBaseTableName + "%", typeSystemName); + final ResultSet typeSystemtableresultSet = stmt.executeQuery(typeSystemTablesQuery); + String typeSystemTableName = null; + if (typeSystemtableresultSet.next()) { + typeSystemTableName = typeSystemtableresultSet.getString("TableName"); + } + + if (typeSystemTableName != null) { + Config.setParameter(SOURCE_TYPESYSTEMNAME, typeSystemName); + final String typesystemsuffix = typeSystemTableName.substring(tsBaseTableName.length()); + + Config.setParameter(SOURCE_TYPESYSTEMSUFFIX, typesystemsuffix); + LOG.info("typeSystemName = " + typeSystemName + ",typesystemsuffix = " + typesystemsuffix); + return; + } + } + } + } + } + + /* + * If enumerationvalueslp, then extract enumerationvalues as base table name. + */ + private String extractTSbaseTableName(final String tableNameFromMigrationItems) { + String tsBaseTableName = tableNameFromMigrationItems; + + // if it ends with lp + if (tableNameFromMigrationItems.toLowerCase().endsWith("lp")) { + tsBaseTableName = tableNameFromMigrationItems.substring(0, tableNameFromMigrationItems.length() - 2); + } + + return tsBaseTableName; + } + + protected MigrationStatus waitForFinishCronjobs(IncrementalMigrationContext context, String migrationID, + final CronJobModel cronJobModel) throws Exception { + MigrationStatus status; + Thread.sleep(5000); + boolean aborted = false; + long since = 0; + do { + OffsetDateTime sinceTime = OffsetDateTime.ofInstant(Instant.ofEpochMilli(since), ZoneOffset.UTC); + status = databaseMigrationService.getMigrationState(context, migrationID, sinceTime); + Thread.sleep(5000); + since = System.currentTimeMillis(); + if (isJobStateAborted(cronJobModel)) { + aborted = true; + break; + } + } while (StringUtils.equalsAnyIgnoreCase(status.getStatus().toString(), RUNNING_MIGRATION)); + + if (aborted) { + LOG.info(" Aborted ...STOPPING migration "); + databaseMigrationService.stopMigration(migrationContext, currentMigrationId); + LOG.error("Database migration has been ABORTED, Migration State= " + status + ", Total Tasks " + + status.getTotalTasks() + ", migration id =" + status.getMigrationID() + ", Completed Tasks " + + status.getCompletedTasks()); + clearAbortRequestedIfNeeded(cronJobModel); + throw new AbortCronJobException("CronJOB ABORTED"); + } + + if (status.isFailed()) { + LOG.error("Database migration FAILED, Migration State= " + status + ", Total Tasks " + + status.getTotalTasks() + ", migration id =" + status.getMigrationID() + ", Completed Tasks " + + status.getCompletedTasks()); + throw new Exception("Database migration failed"); + } + + return status; + } + + protected LaunchOptions createLaunchOptions(MigrationCronJobModel migrationCronJob) { + final LaunchOptions launchOptions = new LaunchOptions(); + + putLaunchOptionProperty(launchOptions, CommercedbsyncConstants.MIGRATION_DATA_MAXPRALLELTABLECOPY, + migrationCronJob.getMaxParallelTableCopy()); + putLaunchOptionProperty(launchOptions, CommercedbsyncConstants.MIGRATION_DATA_WORKERS_READER_MAXTASKS, + migrationCronJob.getMaxReaderWorkers()); + putLaunchOptionProperty(launchOptions, CommercedbsyncConstants.MIGRATION_DATA_WORKERS_WRITER_MAXTASKS, + migrationCronJob.getMaxWriterWorkers()); + putLaunchOptionProperty(launchOptions, CommercedbsyncConstants.MIGRATION_DATA_READER_BATCHSIZE, + migrationCronJob.getBatchSize()); + + return launchOptions; + } + + private void putLaunchOptionProperty(final LaunchOptions launchOptions, String property, Serializable value) { + launchOptions.getPropertyOverrideMap().put(property, + Optional.ofNullable(value).orElseGet(() -> Config.getInt(property, 1))); + } + + protected boolean isJobStateAborted(final CronJobModel cronJobModel) { + this.modelService.refresh(cronJobModel); + LOG.info("cron job status = " + cronJobModel.getStatus()); + LOG.info("cron job request to abort =" + cronJobModel.getRequestAbort()); + return ((cronJobModel.getStatus() == CronJobStatus.ABORTED) + || (cronJobModel.getRequestAbort() != null && cronJobModel.getRequestAbort())); + } @Override public boolean isAbortable() { return true; } - public IncrementalMigrationContext getIncrementalMigrationContext() { - return incrementalMigrationContext; - } - - public void setIncrementalMigrationContext(IncrementalMigrationContext incrementalMigrationContext) { - this.incrementalMigrationContext = incrementalMigrationContext; + public void setMigrationContext(MigrationContext migrationContext) { + this.migrationContext = migrationContext; } public CronJobService getCronJobService() { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/FullMigrationJob.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/FullMigrationJob.java index 9150d66..d3ebcd6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/FullMigrationJob.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/FullMigrationJob.java @@ -1,77 +1,71 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.jobs; import com.google.common.base.Preconditions; +import com.sap.cx.boosters.commercedbsync.context.IncrementalMigrationContext; import de.hybris.platform.cronjob.enums.CronJobResult; import de.hybris.platform.cronjob.enums.CronJobStatus; import de.hybris.platform.cronjob.jalo.AbortCronJobException; import de.hybris.platform.cronjob.model.CronJobModel; import de.hybris.platform.servicelayer.cronjob.PerformResult; -import com.sap.cx.boosters.commercedbsync.MigrationStatus; import com.sap.cx.boosters.commercedbsync.model.cron.FullMigrationCronJobModel; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.time.Instant; -import java.time.OffsetDateTime; -import java.time.ZoneOffset; - - /** * This class offers functionality for FullMigrationJob. */ public class FullMigrationJob extends AbstractMigrationJobPerformable { - private static final Logger LOG = LoggerFactory.getLogger(FullMigrationJob.class); + private static final Logger LOG = LoggerFactory.getLogger(FullMigrationJob.class); + + @Override + public PerformResult perform(final CronJobModel cronJobModel) { + FullMigrationCronJobModel fullMigrationCronJobModel; - @Override - public PerformResult perform(final CronJobModel cronJobModel) { - FullMigrationCronJobModel fullMigrationCronJobModel; + Preconditions.checkState(migrationContext instanceof IncrementalMigrationContext, + "Migration context is not activated for data export via cron job"); + Preconditions.checkState((cronJobModel instanceof FullMigrationCronJobModel), + "cronJobModel must the instance of FullMigrationCronJobModel"); + fullMigrationCronJobModel = (FullMigrationCronJobModel) cronJobModel; + Preconditions.checkNotNull(fullMigrationCronJobModel.getMigrationItems(), + "We expect at least one table for the full migration"); + Preconditions.checkState( + null != fullMigrationCronJobModel.getMigrationItems() + && !fullMigrationCronJobModel.getMigrationItems().isEmpty(), + "We expect at least one table for the full migration"); - Preconditions - .checkState((cronJobModel instanceof FullMigrationCronJobModel), - "cronJobModel must the instance of FullMigrationCronJobModel"); - fullMigrationCronJobModel = (FullMigrationCronJobModel) cronJobModel; - Preconditions.checkNotNull(fullMigrationCronJobModel.getMigrationItems(), - "We expect at least one table for the full migration"); - Preconditions.checkState( - null != fullMigrationCronJobModel.getMigrationItems() && !fullMigrationCronJobModel - .getMigrationItems().isEmpty(), - "We expect at least one table for the full migration"); + final IncrementalMigrationContext incrementalMigrationContext = (IncrementalMigrationContext) migrationContext; + boolean caughtExeption = false; + try { + incrementalMigrationContext.setIncludedTables(fullMigrationCronJobModel.getMigrationItems()); + updateSourceTypesystemProperty(); + updateTypesystemTable(fullMigrationCronJobModel.getMigrationItems()); + incrementalMigrationContext.setDeletionEnabled(false); + incrementalMigrationContext.setLpTableMigrationEnabled(false); + incrementalMigrationContext.setTruncateEnabled(fullMigrationCronJobModel.isTruncateEnabled()); + incrementalMigrationContext + .setSchemaMigrationAutoTriggerEnabled(fullMigrationCronJobModel.isSchemaAutotrigger()); + incrementalMigrationContext.setIncrementalModeEnabled(false); + incrementalMigrationContext + .setFullDatabaseMigrationEnabled(fullMigrationCronJobModel.isFullDatabaseMigration()); + currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext, + createLaunchOptions(fullMigrationCronJobModel)); - boolean caughtExeption = false; - try { - incrementalMigrationContext - .setIncludedTables(fullMigrationCronJobModel.getMigrationItems()); - // ORACLE_TARGET - START there is scope to make the 2 update methods - // efficient - updateSourceTypesystemProperty(); - // ORACLE_TARGET - END there is scope to make the 2 methods - // efficient - updateTypesystemTable(fullMigrationCronJobModel.getMigrationItems()); - incrementalMigrationContext.setDeletionEnabled(false); - incrementalMigrationContext.setLpTableMigrationEnabled(false); - incrementalMigrationContext.setTruncateEnabled(fullMigrationCronJobModel.isTruncateEnabled()); - incrementalMigrationContext.setSchemaMigrationAutoTriggerEnabled(fullMigrationCronJobModel.isSchemaAutotrigger()); - incrementalMigrationContext.setIncrementalModeEnabled(false); - incrementalMigrationContext.setFullDatabaseMigrationEnabled(fullMigrationCronJobModel.isFullDatabaseMigration()); - currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext); - MigrationStatus currentState = waitForFinishCronjobs(incrementalMigrationContext, currentMigrationId,cronJobModel); - } - catch (final AbortCronJobException e) - { - return new PerformResult(CronJobResult.ERROR, CronJobStatus.ABORTED); - } - catch (final Exception e) - { - caughtExeption = true; - LOG.error(" Exception caught: message= " + e.getMessage(), e); - } - return new PerformResult(caughtExeption ? CronJobResult.FAILURE : CronJobResult.SUCCESS, CronJobStatus.FINISHED); - } + waitForFinishCronjobs(incrementalMigrationContext, currentMigrationId, cronJobModel); + } catch (final AbortCronJobException e) { + return new PerformResult(CronJobResult.ERROR, CronJobStatus.ABORTED); + } catch (final Exception e) { + caughtExeption = true; + LOG.error(" Exception caught: message= " + e.getMessage(), e); + } + return new PerformResult(caughtExeption ? CronJobResult.FAILURE : CronJobResult.SUCCESS, + CronJobStatus.FINISHED); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java index cb03d3f..7ac5ca9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/jobs/IncrementalMigrationJob.java @@ -1,22 +1,24 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.jobs; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; +import com.sap.cx.boosters.commercedbsync.context.IncrementalMigrationContext; import de.hybris.platform.cronjob.enums.CronJobResult; import de.hybris.platform.cronjob.enums.CronJobStatus; import de.hybris.platform.cronjob.jalo.AbortCronJobException; import de.hybris.platform.cronjob.model.CronJobModel; import de.hybris.platform.jalo.type.TypeManager; import de.hybris.platform.servicelayer.cronjob.PerformResult; -import de.hybris.platform.servicelayer.model.ModelService; import de.hybris.platform.servicelayer.type.TypeService; import de.hybris.platform.util.Config; + import java.sql.Connection; import java.sql.ResultSet; import java.sql.Statement; @@ -24,6 +26,7 @@ import java.util.*; import java.util.stream.Collectors; import javax.annotation.Resource; + import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang.StringUtils; import com.sap.cx.boosters.commercedbsync.MigrationStatus; @@ -31,229 +34,225 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; - /** * This class offers functionality for IncrementalMigrationJob. */ public class IncrementalMigrationJob extends AbstractMigrationJobPerformable { - private static final Logger LOG = LoggerFactory.getLogger(IncrementalMigrationJob.class); - - private static final String LP_SUFFIX = "lp"; - - private static String tablePrefix = Config.getParameter("db.tableprefix") == null ? "" : Config.getParameter("db.tableprefix"); - - private static final String TABLE_EXISTS_SELECT_STATEMENT_MSSQL = "SELECT TABLE_NAME \n" + - " FROM INFORMATION_SCHEMA.TABLES \n" + - " WHERE TABLE_SCHEMA = '%s' \n" + - " AND TABLE_NAME = '%2$s'\n"; - private static final String TABLE_EXISTS_SELECT_STATEMENT_ORACLE = "SELECT TABLE_NAME \n" + " FROM dba_tables \n" - + " WHERE upper(owner) = upper('%s') \n" + " AND upper(table_name) = upper('%2$s') "; - - private static final String TABLE_EXISTS_SELECT_STATEMENT_HANA = "SELECT TABLE_NAME \n" + " FROM public.tables \n" - + " WHERE schema_name = upper('%s') \n" + " AND table_name = upper('%2$s') "; - - private static final String TABLE_EXISTS_SELECT_STATEMENT_POSTGRES = "SELECT TABLE_NAME \n" + " FROM public.tables \n" - + " WHERE schema_name = upper('%s') \n" + " AND table_name = upper('%2$s') "; - - - @Resource(name = "typeService") - private TypeService typeService; - - @Override - public PerformResult perform(final CronJobModel cronJobModel) { - IncrementalMigrationCronJobModel incrementalMigrationCronJob; - - Preconditions - .checkState((cronJobModel instanceof IncrementalMigrationCronJobModel), - "cronJobModel must the instance of FullMigrationCronJobModel"); - modelService.refresh(cronJobModel); - - incrementalMigrationCronJob = (IncrementalMigrationCronJobModel) cronJobModel; - Preconditions.checkState( - null != incrementalMigrationCronJob.getMigrationItems() && !incrementalMigrationCronJob - .getMigrationItems().isEmpty(), - "We expect at least one table for the incremental migration"); - final Set deletionTableSet = getDeletionTableSet(incrementalMigrationCronJob.getMigrationItems()); - MigrationStatus currentState; - String currentMigrationId; - boolean caughtExeption = false; - try { - - if (null != incrementalMigrationCronJob.getLastStartTime()) { - Instant timeStampInstant = incrementalMigrationCronJob.getLastStartTime().toInstant(); - LOG.info("For {} IncrementalTimestamp : {} ", incrementalMigrationCronJob.getCode(), - timeStampInstant); - incrementalMigrationContext.setIncrementalMigrationTimestamp(timeStampInstant); - } else { - LOG.error("IncrementalTimestamp is not set for Cronjobs : {} , Aborting the migration, and please set the *lastStartTime* before triggering" + - " ", incrementalMigrationCronJob.getCode()); - return new PerformResult(CronJobResult.ERROR, CronJobStatus.ABORTED); - } - incrementalMigrationContext.setIncrementalModeEnabled(true); - incrementalMigrationContext.setTruncateEnabled(Optional.ofNullable(incrementalMigrationCronJob.isTruncateEnabled()) - .map(e -> incrementalMigrationCronJob.isTruncateEnabled()) - .orElse(false)); - updateSourceTypesystemProperty(); - if (CollectionUtils.isNotEmpty(deletionTableSet) && isSchemaMigrationRequired(deletionTableSet)) { - // deletionTableSet.add(deletionTable); - LOG.info("Running Deletion incremental migration"); - incrementalMigrationContext.setSchemaMigrationAutoTriggerEnabled(false); - incrementalMigrationContext.setIncrementalTables(deletionTableSet); - incrementalMigrationContext.setDeletionEnabled(true); - incrementalMigrationContext.setLpTableMigrationEnabled(false); - currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext); - currentState = databaseMigrationService.waitForFinish(this.incrementalMigrationContext, currentMigrationId); - } - - // Running incremental migration - Set tablesWithoutLp = incrementalMigrationCronJob.getMigrationItems().stream(). - filter(table-> !(StringUtils.endsWithIgnoreCase(table, LP_SUFFIX))).collect( - Collectors.toSet()); - if(CollectionUtils.isNotEmpty(tablesWithoutLp)){ - LOG.info("Running incremental migration for Non LP Table"); - incrementalMigrationContext.setDeletionEnabled(false); - incrementalMigrationContext.setLpTableMigrationEnabled(false); - incrementalMigrationContext.setIncrementalTables(tablesWithoutLp); - incrementalMigrationContext.setSchemaMigrationAutoTriggerEnabled(incrementalMigrationCronJob.isSchemaAutotrigger()); - currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext); - currentState = waitForFinishCronjobs(incrementalMigrationContext, currentMigrationId,cronJobModel); - - } - // Running incremental migration for LP Table - Set tablesWithLp = incrementalMigrationCronJob.getMigrationItems().stream(). - filter(table-> StringUtils.endsWithIgnoreCase(table, LP_SUFFIX)).collect( - Collectors.toSet()); - if(CollectionUtils.isNotEmpty(tablesWithLp)){ - LOG.info("Running incremental migration for LP Table"); - incrementalMigrationContext.setDeletionEnabled(false); - incrementalMigrationContext.setLpTableMigrationEnabled(true); - incrementalMigrationContext.setIncrementalTables(tablesWithLp); - incrementalMigrationContext.setSchemaMigrationAutoTriggerEnabled(incrementalMigrationCronJob.isSchemaAutotrigger()); - currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext); - currentState = waitForFinishCronjobs(incrementalMigrationContext, currentMigrationId,cronJobModel); - } - } - catch (final AbortCronJobException e) - { - caughtExeption = true; - return new PerformResult(CronJobResult.ERROR, CronJobStatus.ABORTED); - } - catch (final Exception e) { - caughtExeption = true; - LOG.error("Exception caught:", e); - } - if (!caughtExeption) { - incrementalMigrationCronJob.setLastStartTime(cronJobModel.getStartTime()); - modelService.save(cronJobModel); - } - return new PerformResult(caughtExeption ? CronJobResult.FAILURE : CronJobResult.SUCCESS, - CronJobStatus.FINISHED); - } - - private Set getDeletionTableSetFromItemType(Set incMigrationItems) { - String deletionItemTypes = Config - .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES, ""); - if (StringUtils.isEmpty(deletionItemTypes)) { - return Collections.emptySet(); - } + private static final Logger LOG = LoggerFactory.getLogger(IncrementalMigrationJob.class); - final Set result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + private static final String LP_SUFFIX = "lp"; - final List itemtypesArray = Splitter.on(",") - .omitEmptyStrings() - .trimResults() - .splitToList(deletionItemTypes.toLowerCase()); + private static final String TABLE_PREFIX = Config.getString("db.tableprefix", ""); - String tableName; - for(String itemType : itemtypesArray){ - tableName = typeService.getComposedTypeForCode(itemType).getTable(); + private static final boolean DELETIONS_BY_TYPECODES_ENABLED = Config + .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES_ENABLED, false); + private static final boolean DELETIONS_BY_ITEMTYPES_ENABLED = Config + .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES_ENABLED, false); - if(StringUtils.startsWith(tableName,tablePrefix)){ - tableName = StringUtils.removeStart(tableName,tablePrefix); - } - if(incMigrationItems.contains(tableName)){ - result.add(tableName); - } - } - return result; - } - - private Set getDeletionTableSetFromTypeCodes(Set incMigrationItems) { - String deletionTypecodes = Config - .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES, ""); - if (StringUtils.isEmpty(deletionTypecodes)) { - return Collections.emptySet(); - } + private static final String TABLE_EXISTS_SELECT_STATEMENT_MSSQL = "SELECT TABLE_NAME \n" + + " FROM INFORMATION_SCHEMA.TABLES \n" + " WHERE TABLE_SCHEMA = '%s' \n" + " AND TABLE_NAME = '%2$s'\n"; + private static final String TABLE_EXISTS_SELECT_STATEMENT_ORACLE = "SELECT TABLE_NAME \n" + " FROM dba_tables \n" + + " WHERE upper(owner) = upper('%s') \n" + " AND upper(table_name) = upper('%2$s') "; - final Set result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + private static final String TABLE_EXISTS_SELECT_STATEMENT_HANA = "SELECT TABLE_NAME \n" + " FROM public.tables \n" + + " WHERE schema_name = upper('%s') \n" + " AND table_name = upper('%2$s') "; - final List typecodeArray = Splitter.on(",") - .omitEmptyStrings() - .trimResults() - .splitToList(deletionTypecodes.toLowerCase()); + private static final String TABLE_EXISTS_SELECT_STATEMENT_POSTGRES = "SELECT TABLE_NAME \n" + + " FROM public.tables \n" + " WHERE schema_name = upper('%s') \n" + " AND table_name = upper('%2$s') "; - String tableName; - for(String typecode : typecodeArray){ - tableName = TypeManager.getInstance() - .getRootComposedType(Integer.valueOf(typecode)).getTable(); + @Resource(name = "typeService") + private TypeService typeService; - if(StringUtils.startsWith(tableName,tablePrefix)){ - tableName = StringUtils.removeStart(tableName,tablePrefix); - } - if(incMigrationItems.contains(tableName)){ - result.add(tableName); - } + @Override + public PerformResult perform(final CronJobModel cronJobModel) { + IncrementalMigrationCronJobModel incrementalMigrationCronJob; + + Preconditions.checkState(migrationContext instanceof IncrementalMigrationContext, + "Migration context is not activated for data export via cron job"); + Preconditions.checkState((cronJobModel instanceof IncrementalMigrationCronJobModel), + "cronJobModel must the instance of FullMigrationCronJobModel"); + modelService.refresh(cronJobModel); + + incrementalMigrationCronJob = (IncrementalMigrationCronJobModel) cronJobModel; + Preconditions.checkState( + null != incrementalMigrationCronJob.getMigrationItems() + && !incrementalMigrationCronJob.getMigrationItems().isEmpty(), + "We expect at least one table for the incremental migration"); + final Set deletionTableSet = getDeletionTableSet(incrementalMigrationCronJob.getMigrationItems()); + final IncrementalMigrationContext incrementalMigrationContext = (IncrementalMigrationContext) migrationContext; + MigrationStatus currentState; + String currentMigrationId; + boolean caughtExeption = false; + try { + + if (null != incrementalMigrationCronJob.getLastStartTime()) { + Instant timeStampInstant = incrementalMigrationCronJob.getLastStartTime().toInstant(); + LOG.info("For {} IncrementalTimestamp : {} ", incrementalMigrationCronJob.getCode(), timeStampInstant); + incrementalMigrationContext.setIncrementalMigrationTimestamp(timeStampInstant); + } else { + LOG.error( + "IncrementalTimestamp is not set for Cronjobs : {} , Aborting the migration, and please set the *lastStartTime* before triggering" + + " ", + incrementalMigrationCronJob.getCode()); + return new PerformResult(CronJobResult.ERROR, CronJobStatus.ABORTED); + } + incrementalMigrationContext.setIncrementalModeEnabled(true); + incrementalMigrationContext + .setTruncateEnabled(Optional.ofNullable(incrementalMigrationCronJob.isTruncateEnabled()) + .map(e -> incrementalMigrationCronJob.isTruncateEnabled()).orElse(false)); + updateSourceTypesystemProperty(); + if (CollectionUtils.isNotEmpty(deletionTableSet) && !isSchemaMigrationRequired(deletionTableSet)) { + // deletionTableSet.add(deletionTable); + LOG.info("Running Deletion incremental migration"); + incrementalMigrationContext.setSchemaMigrationAutoTriggerEnabled(false); + incrementalMigrationContext.setIncrementalTables(deletionTableSet); + incrementalMigrationContext.setDeletionEnabled(true); + incrementalMigrationContext.setLpTableMigrationEnabled(false); + currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext, + createLaunchOptions(incrementalMigrationCronJob)); + + databaseMigrationService.waitForFinish(migrationContext, currentMigrationId); + } + + // Running incremental migration + Set tablesWithoutLp = incrementalMigrationCronJob.getMigrationItems().stream() + .filter(table -> !(StringUtils.endsWithIgnoreCase(table, LP_SUFFIX))).collect(Collectors.toSet()); + if (CollectionUtils.isNotEmpty(tablesWithoutLp)) { + LOG.info("Running incremental migration for Non LP Table"); + incrementalMigrationContext.setDeletionEnabled(false); + incrementalMigrationContext.setLpTableMigrationEnabled(false); + incrementalMigrationContext.setIncrementalTables(tablesWithoutLp); + incrementalMigrationContext + .setSchemaMigrationAutoTriggerEnabled(incrementalMigrationCronJob.isSchemaAutotrigger()); + currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext, + createLaunchOptions(incrementalMigrationCronJob)); + + waitForFinishCronjobs(incrementalMigrationContext, currentMigrationId, cronJobModel); + } + // Running incremental migration for LP Table + Set tablesWithLp = incrementalMigrationCronJob.getMigrationItems().stream() + .filter(table -> StringUtils.endsWithIgnoreCase(table, LP_SUFFIX)).collect(Collectors.toSet()); + if (CollectionUtils.isNotEmpty(tablesWithLp)) { + LOG.info("Running incremental migration for LP Table"); + incrementalMigrationContext.setDeletionEnabled(false); + incrementalMigrationContext.setLpTableMigrationEnabled(true); + incrementalMigrationContext.setIncrementalTables(tablesWithLp); + incrementalMigrationContext + .setSchemaMigrationAutoTriggerEnabled(incrementalMigrationCronJob.isSchemaAutotrigger()); + currentMigrationId = databaseMigrationService.startMigration(incrementalMigrationContext, + createLaunchOptions(incrementalMigrationCronJob)); + + waitForFinishCronjobs(incrementalMigrationContext, currentMigrationId, cronJobModel); + } + } catch (final AbortCronJobException e) { + return new PerformResult(CronJobResult.ERROR, CronJobStatus.ABORTED); + } catch (final Exception e) { + caughtExeption = true; + LOG.error("Exception caught:", e); + } + if (!caughtExeption) { + incrementalMigrationCronJob.setLastStartTime(cronJobModel.getStartTime()); + modelService.save(cronJobModel); + } + return new PerformResult(caughtExeption ? CronJobResult.FAILURE : CronJobResult.SUCCESS, + CronJobStatus.FINISHED); } - return result; - } - - // TO do , change to static varriable - private Set getDeletionTableSet(Set incMigrationItems){ - if(Config - .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES_ENABLED, false)){ - return getDeletionTableSetFromTypeCodes(incMigrationItems); + + private Set getDeletionTableSetFromItemType(Set incMigrationItems) { + String deletionItemTypes = Config + .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES, ""); + if (StringUtils.isEmpty(deletionItemTypes)) { + return Collections.emptySet(); + } + + final Set result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + + final List itemtypesArray = Splitter.on(",").omitEmptyStrings().trimResults() + .splitToList(deletionItemTypes.toLowerCase()); + + String tableName; + for (String itemType : itemtypesArray) { + tableName = typeService.getComposedTypeForCode(itemType).getTable(); + + if (StringUtils.isNotEmpty(TABLE_PREFIX) && StringUtils.startsWith(tableName, TABLE_PREFIX)) { + tableName = StringUtils.removeStart(tableName, TABLE_PREFIX); + } + if (incMigrationItems.contains(tableName)) { + result.add(tableName); + } + } + return result; } - else if(Config - .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_ITEMTYPES_ENABLED, false)){ - getDeletionTableSetFromItemType(incMigrationItems); + + private Set getDeletionTableSetFromTypeCodes(Set incMigrationItems) { + String deletionTypecodes = Config + .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES, ""); + if (StringUtils.isEmpty(deletionTypecodes)) { + return Collections.emptySet(); + } + + final Set result = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + + final List typecodeArray = Splitter.on(",").omitEmptyStrings().trimResults() + .splitToList(deletionTypecodes.toLowerCase()); + + String tableName; + for (String typecode : typecodeArray) { + tableName = TypeManager.getInstance().getRootComposedType(Integer.valueOf(typecode)).getTable(); + + if (StringUtils.startsWith(tableName, TABLE_PREFIX)) { + tableName = StringUtils.removeStart(tableName, TABLE_PREFIX); + } + if (incMigrationItems.contains(tableName)) { + result.add(tableName); + } + } + return result; } - return Collections.emptySet(); - } - - - private boolean isSchemaMigrationRequired(Set deletionTableSet) throws Exception { - String TABLE_EXISTS_SELECT_STATEMENT; - if(incrementalMigrationContext.getDataTargetRepository().getDatabaseProvider().isHanaUsed()){ - TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_HANA; - } else if(incrementalMigrationContext.getDataTargetRepository().getDatabaseProvider().isOracleUsed()){ - TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_ORACLE; - } else if(incrementalMigrationContext.getDataTargetRepository().getDatabaseProvider().isMssqlUsed()){ - TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_MSSQL; - }else if(incrementalMigrationContext.getDataTargetRepository().getDatabaseProvider().isPostgreSqlUsed()){ - TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_POSTGRES; - } else{ - TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_MSSQL; + + private Set getDeletionTableSet(Set incMigrationItems) { + Set deletionTables = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); + + if (DELETIONS_BY_TYPECODES_ENABLED) { + deletionTables.addAll(getDeletionTableSetFromTypeCodes(incMigrationItems)); + } + + if (DELETIONS_BY_ITEMTYPES_ENABLED) { + deletionTables.addAll(getDeletionTableSetFromItemType(incMigrationItems)); + } + + return Collections.unmodifiableSet(deletionTables); } - try ( - Connection connection = incrementalMigrationContext.getDataTargetRepository() - .getConnection(); - Statement stmt = connection.createStatement(); - ) { - for (final String tableName : deletionTableSet) { - try (ResultSet resultSet = stmt.executeQuery(String.format(TABLE_EXISTS_SELECT_STATEMENT, - incrementalMigrationContext.getDataTargetRepository().getDataSourceConfiguration() - .getSchema(), tableName)); - ) { - String TABLE_NAME = null; - if (resultSet.next()) { - //TABLE_NAME = resultSet.getString("TABLE_NAME"); - } else { - return true; - } + + private boolean isSchemaMigrationRequired(Set deletionTableSet) throws Exception { + String TABLE_EXISTS_SELECT_STATEMENT; + if (migrationContext.getDataTargetRepository().getDatabaseProvider().isHanaUsed()) { + TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_HANA; + } else if (migrationContext.getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { + TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_ORACLE; + } else if (migrationContext.getDataTargetRepository().getDatabaseProvider().isMssqlUsed()) { + TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_MSSQL; + } else if (migrationContext.getDataTargetRepository().getDatabaseProvider().isPostgreSqlUsed()) { + TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_POSTGRES; + } else { + TABLE_EXISTS_SELECT_STATEMENT = TABLE_EXISTS_SELECT_STATEMENT_MSSQL; + } + try (Connection connection = migrationContext.getDataTargetRepository().getConnection(); + Statement stmt = connection.createStatement()) { + for (final String tableName : deletionTableSet) { + try (ResultSet resultSet = stmt.executeQuery(String.format(TABLE_EXISTS_SELECT_STATEMENT, + migrationContext.getDataTargetRepository().getDataSourceConfiguration().getSchema(), + tableName))) { + String TABLE_NAME = null; + if (resultSet.next()) { + // TABLE_NAME = resultSet.getString("TABLE_NAME"); + } else { + return true; + } + } + } } - } + return false; } - return false; - } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java index 1534346..832adfc 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/listeners/DefaultCMTAfterSaveListener.java @@ -1,8 +1,9 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.listeners; import com.google.common.base.Preconditions; @@ -14,9 +15,11 @@ import de.hybris.platform.tx.AfterSaveEvent; import de.hybris.platform.tx.AfterSaveListener; import de.hybris.platform.util.Config; + import java.util.Collection; import java.util.Collections; import java.util.List; + import org.apache.commons.lang.StringUtils; import com.sap.cx.boosters.commercedbsync.enums.ItemChangeType; import com.sap.cx.boosters.commercedbsync.model.ItemDeletionMarkerModel; @@ -24,93 +27,84 @@ import org.slf4j.LoggerFactory; /** - * DefaultCMTAfterSaveListener is an implementation of {@link AfterSaveListener} for use with - * capturing changes to Delete operations for any configured data models. - * + * DefaultCMTAfterSaveListener is an implementation of {@link AfterSaveListener} + * for use with capturing changes to Delete operations for any configured data + * models. */ public class DefaultCMTAfterSaveListener implements AfterSaveListener { - private static final Logger LOG = LoggerFactory.getLogger(DefaultCMTAfterSaveListener.class); - - private ModelService modelService; - - private static final String COMMA_SEPERATOR = ","; - - private TypeService typeService; - - private static final boolean deletionsEnabled = Config - .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES_ENABLED,false); - - - @Override - public void afterSave(final Collection events) { - if (!deletionsEnabled) { - if (LOG.isDebugEnabled()) { - LOG.debug("CMT deletions is not enabled for ItemModel."); - } - return; - } - - List deletionsTypeCode = getListDeletionsTypeCode(); - - if (deletionsTypeCode == null || deletionsTypeCode.isEmpty()) { - if (LOG.isDebugEnabled()) { - LOG.debug("No typecode defined to create a deletion record for CMT "); - } - return; - } - events.forEach(event -> { - { - final int type = event.getType(); - final String typeCodeAsString = event.getPk().getTypeCodeAsString(); - if (AfterSaveEvent.REMOVE == type && deletionsTypeCode.contains(typeCodeAsString)) { - final String tableName = TypeManager.getInstance() - .getRootComposedType(event.getPk().getTypeCode()).getTable(); - final ItemDeletionMarkerModel idm = modelService.create(ItemDeletionMarkerModel.class); - convertAndfillInitialDeletionMarker(idm, event.getPk().getLong(), - tableName); - modelService.save(idm); - - } - } - }); - - } - - private void convertAndfillInitialDeletionMarker(final ItemDeletionMarkerModel marker, final Long itemPK, - final String table) - { - Preconditions.checkNotNull(marker, "ItemDeletionMarker cannot be null in this place"); - Preconditions - .checkArgument(marker.getItemModelContext().isNew(), "ItemDeletionMarker must be new"); - - marker.setItemPK(itemPK); - marker.setTable(table); - marker.setChangeType(ItemChangeType.DELETED); - } - - - // TO DO change to static variable - private List getListDeletionsTypeCode() { - final String typeCodes = Config.getString( - CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES, ""); - if (StringUtils.isEmpty(typeCodes)) { - return Collections.emptyList(); - } - List result = Splitter.on(COMMA_SEPERATOR) - .omitEmptyStrings() - .trimResults() - .splitToList(typeCodes); - - return result; - } - - public void setModelService(final ModelService modelService) - { - this.modelService = modelService; - } - - public void setTypeService(TypeService typeService) { - this.typeService = typeService; - } + private static final Logger LOG = LoggerFactory.getLogger(DefaultCMTAfterSaveListener.class); + + private ModelService modelService; + + private static final String COMMA_SEPERATOR = ","; + + private TypeService typeService; + + private static final boolean deletionsEnabled = Config + .getBoolean(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES_ENABLED, false); + + @Override + public void afterSave(final Collection events) { + if (!deletionsEnabled) { + if (LOG.isDebugEnabled()) { + LOG.debug("CMT deletions is not enabled for ItemModel."); + } + return; + } + + List deletionsTypeCode = getListDeletionsTypeCode(); + + if (deletionsTypeCode == null || deletionsTypeCode.isEmpty()) { + if (LOG.isDebugEnabled()) { + LOG.debug("No typecode defined to create a deletion record for CMT "); + } + return; + } + events.forEach(event -> { + { + final int type = event.getType(); + final String typeCodeAsString = event.getPk().getTypeCodeAsString(); + if (AfterSaveEvent.REMOVE == type && deletionsTypeCode.contains(typeCodeAsString)) { + final String tableName = TypeManager.getInstance().getRootComposedType(event.getPk().getTypeCode()) + .getTable(); + final ItemDeletionMarkerModel idm = modelService.create(ItemDeletionMarkerModel.class); + convertAndfillInitialDeletionMarker(idm, event.getPk().getLong(), tableName); + modelService.save(idm); + + } + } + }); + + } + + private void convertAndfillInitialDeletionMarker(final ItemDeletionMarkerModel marker, final Long itemPK, + final String table) { + Preconditions.checkNotNull(marker, "ItemDeletionMarker cannot be null in this place"); + Preconditions.checkArgument(marker.getItemModelContext().isNew(), "ItemDeletionMarker must be new"); + + marker.setItemPK(itemPK); + marker.setTable(table); + marker.setChangeType(ItemChangeType.DELETED); + } + + // TO DO change to static variable + private List getListDeletionsTypeCode() { + final String typeCodes = Config + .getString(CommercedbsyncConstants.MIGRATION_DATA_INCREMENTAL_DELETIONS_TYPECODES, ""); + if (StringUtils.isEmpty(typeCodes)) { + return Collections.emptyList(); + } + List result = Splitter.on(COMMA_SEPERATOR).omitEmptyStrings().trimResults().splitToList(typeCodes); + + return result; + } + + public void setModelService(final ModelService modelService) { + this.modelService = modelService; + } + + public void setTypeService(TypeService typeService) { + this.typeService = typeService; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/JDBCQueriesStore.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/JDBCQueriesStore.java new file mode 100644 index 0000000..4c05aa2 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/JDBCQueriesStore.java @@ -0,0 +1,206 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.logging; + +import com.microsoft.azure.storage.CloudStorageAccount; +import com.microsoft.azure.storage.blob.CloudAppendBlob; +import com.microsoft.azure.storage.blob.CloudBlobClient; +import com.microsoft.azure.storage.blob.CloudBlobContainer; +import com.microsoft.azure.storage.blob.CloudBlobDirectory; +import com.microsoft.azure.storage.blob.CloudBlockBlob; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.InputStream; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.security.InvalidKeyException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import com.sap.cx.boosters.commercedbsync.repository.DataRepository; +import org.apache.commons.lang3.tuple.Pair; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.utils.FileUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * In-memory store containing all JDBC queries ran on a {@link DataRepository} + * The store gets cleared from its elements after each migration + */ +public class JDBCQueriesStore { + + private static final Logger LOG = LoggerFactory.getLogger(JDBCQueriesStore.class); + + public static final String JDBCLOGS_DIRECTORY = "jdbclogs"; + + private final String dbConnectionString; + private final Collection queryLogs; + private final MigrationContext context; + + private final boolean isSourceDB; + // Unique id of the file in file storage where the jdbc store(s) across + // cluster(s) (in multi-cluster mode) for this datasource append the JDBC + // queries to. At the end of each migration, the post processor will create a + // zip out of + // this file and name it -[source/target]-jdbc-logs.zip + private final String sharedStoreLogFileName; + + public JDBCQueriesStore(final String dbConnectionString, final MigrationContext context, final boolean isSourceDB) { + this.queryLogs = Collections.synchronizedCollection(new ArrayList<>()); + this.dbConnectionString = dbConnectionString; + this.isSourceDB = isSourceDB; + this.sharedStoreLogFileName = isSourceDB + ? "source-db-jdbc-store-appending-file" + : "target-db-jdbc-store-appending-file"; + this.context = context; + } + + /** + * Add a JDBC query to the store + * + * @param newEntry + * new JDBC query to add to the store + */ + public void addEntry(JdbcQueryLog newEntry) { + if (queryLogs.size() >= context.getSqlStoreMemoryFlushThreshold()) { + flushQueryLogsToAppendingFile(); + queryLogs.clear(); + } + queryLogs.add(newEntry); + } + + /** + * Clears the store from all its JDBC queries and deletes the temporary + * appending file + */ + public void clear() { + queryLogs.clear(); + resetAppendingFile(); + } + + public int getInMemoryQueryLogsCount() { + return queryLogs.size(); + } + + /** + * Writes the JDBC queries of the store to a log file in the file storage + * associated with this store and compresses the log file. + */ + public void writeToLogFileAndCompress(final String migrationId) { + flushQueryLogsToAppendingFile(); + compressAppendingFileContent(migrationId); + // delete the temporary appending file to start fresh on the next migration + resetAppendingFile(); + LOG.info("Wrote JDBC Queries logs to {} in storage {} for datasource {}", getLogFileName(migrationId, true), + context.getFileStorageConnectionString(), dbConnectionString); + } + + public Pair getLogFile(final String migrationId) { + final String logFileName = getLogFileName(migrationId, true); + try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + CloudBlobDirectory jdbcLogsDirectory = getContainer().getDirectoryReference("jdbclogs"); + CloudBlockBlob zippedLogBlobFile = jdbcLogsDirectory.getBlockBlobReference(logFileName); + zippedLogBlobFile.download(baos); + return Pair.of(baos.toByteArray(), logFileName); + } catch (Exception e) { + String errorMessage = String.format( + "Log file %s for datasource %s does not exist in storage %s or is currently being created", + logFileName, dbConnectionString, context.getFileStorageContainerName()); + LOG.error(errorMessage, e); + return Pair.of(errorMessage.getBytes(StandardCharsets.UTF_8), getLogFileName(migrationId, false)); + } + } + + @Override + public String toString() { + return "JDBCEntriesInMemoryStore{" + "connectionString='" + dbConnectionString + '}'; + } + + private void flushQueryLogsToAppendingFile() { + try { + CloudBlobDirectory jdbcLogsDirectory = getContainer().getDirectoryReference(JDBCLOGS_DIRECTORY); + CloudAppendBlob sharedStoreLogFile = jdbcLogsDirectory.getAppendBlobReference(sharedStoreLogFileName); + byte[] queryLogsBytes = getQueryLogsAsString().getBytes(StandardCharsets.UTF_8.name()); + try (InputStream is = new ByteArrayInputStream(queryLogsBytes)) { + sharedStoreLogFile.appendBlock(is, queryLogsBytes.length); + } + } catch (Exception e) { + LOG.error("Failed to flush querylogs to file {} in storage {} for datasource {}", sharedStoreLogFileName, + context.getFileStorageConnectionString(), dbConnectionString, e); + } + } + + private String getQueryLogsAsString() { + // Get an array out of the elements + // to prevent ConcurrentModificationException + // on the SynchronizedCollection + return Stream.of(queryLogs.toArray()).map(Object::toString).collect(Collectors.joining("\n")); + } + + private void compressAppendingFileContent(final String migrationId) { + try (ByteArrayOutputStream baos = new ByteArrayOutputStream()) { + CloudBlobDirectory jdbcLogsDirectory = getContainer().getDirectoryReference(JDBCLOGS_DIRECTORY); + CloudAppendBlob sharedStoreLogFile = jdbcLogsDirectory.getAppendBlobReference(this.sharedStoreLogFileName); + sharedStoreLogFile.download(baos); + byte[] zippedLogBytes = FileUtils.zipBytes(getLogFileName(migrationId, false), baos.toByteArray()); + CloudBlockBlob zippedLogBlobFile = jdbcLogsDirectory + .getBlockBlobReference(getLogFileName(migrationId, true)); + zippedLogBlobFile.uploadFromByteArray(zippedLogBytes, 0, zippedLogBytes.length); + } catch (Exception e) { + LOG.error("Failed to compress query logs from file {} in storage {} for datasource {}", + getLogFileName(migrationId, false), context.getFileStorageConnectionString(), dbConnectionString, + e); + } + } + + private void resetAppendingFile() { + try { + CloudBlobClient blobClient = getCloudBlobClient(); + CloudBlobDirectory jdbcLogsDirectory = blobClient + .getContainerReference(context.getFileStorageContainerName()).getDirectoryReference("jdbclogs"); + CloudAppendBlob logBlobFile = jdbcLogsDirectory.getAppendBlobReference(sharedStoreLogFileName); + logBlobFile.createOrReplace(); + } catch (Exception e) { + LOG.error("Failed to create or replace appending file {} in storage {} for datasource {}", + sharedStoreLogFileName, context.getFileStorageContainerName(), dbConnectionString, e); + } + } + + private CloudBlobClient getCloudBlobClient() throws URISyntaxException, InvalidKeyException { + // if file storage connection string is not set, do not try to connect to the + // storage + if (context.getFileStorageConnectionString() == null) { + throw new IllegalArgumentException("File storage connection string not set"); + } + CloudStorageAccount account = CloudStorageAccount.parse(context.getFileStorageConnectionString()); + return account.createCloudBlobClient(); + } + + private CloudBlobContainer getContainer() throws Exception { + CloudBlobContainer containerReference = getCloudBlobClient() + .getContainerReference(context.getFileStorageContainerName()); + + containerReference.createIfNotExists(); + + return containerReference; + } + + private String getLogFileName(final String migrationId, final boolean isZipped) { + final String filePrefix = isSourceDB ? "source" : "target"; + final String extension = isZipped ? "zip" : "log"; + return migrationId + "-" + filePrefix + "-jdbc-logs." + extension; + } + + public boolean isSourceDB() { + return isSourceDB; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/JdbcQueryLog.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/JdbcQueryLog.java new file mode 100644 index 0000000..8a1c6ee --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/JdbcQueryLog.java @@ -0,0 +1,77 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.logging; + +import com.sap.cx.boosters.commercedbsync.repository.DataRepository; + +import java.time.ZonedDateTime; +import java.util.Collections; +import java.util.Map; + +/** + * Immutable value-based classes representing a JDBC query ran by the migration + * tool against a {@link DataRepository} + */ +public class JdbcQueryLog { + + private final ZonedDateTime jdbcQueryTimestamp; + private final String jdbcQuery; + private final Map parameters; + + public JdbcQueryLog(final String jdbcQuery) { + this(jdbcQuery, null); + } + + public JdbcQueryLog(final String jdbcQuery, final Map parameters) { + this.jdbcQueryTimestamp = ZonedDateTime.now(); + this.jdbcQuery = jdbcQuery; + if (parameters == null || parameters.isEmpty()) { + this.parameters = null; + } else { + this.parameters = Collections.unmodifiableMap(parameters); + } + } + + /** + * Timestamp when the JDBC query was executed + * + * @return timestamp of the JDBC query + */ + public ZonedDateTime getJdbcQueryTimestamp() { + return jdbcQueryTimestamp; + } + + /** + * String representation of the JDBC query + * + * @return string representation of the JDBC query + */ + public String getJdbcQuery() { + return jdbcQuery; + } + + /** + * If the JDBC query has parameters, this will return the map of the parameters + * with the parameter index as key and the parameter value as value + * + * @return the JDBC query parameters, if it has any; null otherwise. + */ + public Map getParameters() { + return parameters; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("{timestamp=").append(jdbcQueryTimestamp).append(", query='") + .append(jdbcQuery).append("'"); + if (parameters != null) { + sb.append(", parameters=").append(parameters); + } + sb.append("}"); + return sb.toString(); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingConnectionWrapper.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingConnectionWrapper.java new file mode 100644 index 0000000..0321e4a --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingConnectionWrapper.java @@ -0,0 +1,266 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.logging; + +import java.sql.Array; +import java.sql.Blob; +import java.sql.CallableStatement; +import java.sql.Clob; +import java.sql.Connection; +import java.sql.DatabaseMetaData; +import java.sql.NClob; +import java.sql.PreparedStatement; +import java.sql.SQLClientInfoException; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.SQLXML; +import java.sql.Savepoint; +import java.sql.Statement; +import java.sql.Struct; +import java.util.Map; +import java.util.Properties; +import java.util.concurrent.Executor; + +/** + * Wrapper of {@link Connection} to add custom logging behavior when SQL queries + * are processed by the {@link Connection} + */ +public class LoggingConnectionWrapper implements Connection { + + private final Connection connection; + private final JDBCQueriesStore jdbcQueriesStore; + private final boolean logSqlParams; + + public LoggingConnectionWrapper(final Connection connection, final JDBCQueriesStore jdbcQueriesStore, + final boolean logSqlParams) { + this.connection = connection; + this.jdbcQueriesStore = jdbcQueriesStore; + this.logSqlParams = logSqlParams; + } + + public Statement createStatement() throws SQLException { + return new LoggingStatementWrapper(connection.createStatement(), jdbcQueriesStore, null, logSqlParams); + } + + public PreparedStatement prepareStatement(String sql) throws SQLException { + return new LoggingPreparedStatementWrapper(connection.prepareStatement(sql), sql, jdbcQueriesStore, + logSqlParams); + } + + public CallableStatement prepareCall(String sql) throws SQLException { + return connection.prepareCall(sql); + } + + public String nativeSQL(String sql) throws SQLException { + return connection.nativeSQL(sql); + } + + public void setAutoCommit(boolean autoCommit) throws SQLException { + connection.setAutoCommit(autoCommit); + } + + public boolean getAutoCommit() throws SQLException { + return connection.getAutoCommit(); + } + + public void commit() throws SQLException { + connection.commit(); + } + + public void rollback() throws SQLException { + connection.rollback(); + } + + public void close() throws SQLException { + connection.close(); + } + + public boolean isClosed() throws SQLException { + return connection.isClosed(); + } + + public DatabaseMetaData getMetaData() throws SQLException { + return connection.getMetaData(); + } + + public void setReadOnly(boolean readOnly) throws SQLException { + connection.setReadOnly(readOnly); + } + + public boolean isReadOnly() throws SQLException { + return connection.isReadOnly(); + } + + public void setCatalog(String catalog) throws SQLException { + connection.setCatalog(catalog); + } + + public String getCatalog() throws SQLException { + return connection.getCatalog(); + } + + public void setTransactionIsolation(int level) throws SQLException { + connection.setTransactionIsolation(level); + } + + public int getTransactionIsolation() throws SQLException { + return connection.getTransactionIsolation(); + } + + public SQLWarning getWarnings() throws SQLException { + return connection.getWarnings(); + } + + public void clearWarnings() throws SQLException { + connection.clearWarnings(); + } + + public Statement createStatement(int resultSetType, int resultSetConcurrency) throws SQLException { + return connection.createStatement(resultSetType, resultSetConcurrency); + } + + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency) + throws SQLException { + return connection.prepareStatement(sql, resultSetType, resultSetConcurrency); + } + + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency) throws SQLException { + return connection.prepareCall(sql, resultSetType, resultSetConcurrency); + } + + public Map> getTypeMap() throws SQLException { + return connection.getTypeMap(); + } + + public void setTypeMap(Map> map) throws SQLException { + connection.setTypeMap(map); + } + + public void setHoldability(int holdability) throws SQLException { + connection.setHoldability(holdability); + } + + public int getHoldability() throws SQLException { + return connection.getHoldability(); + } + + public Savepoint setSavepoint() throws SQLException { + return connection.setSavepoint(); + } + + public Savepoint setSavepoint(String name) throws SQLException { + return connection.setSavepoint(name); + } + + public void rollback(Savepoint savepoint) throws SQLException { + connection.rollback(savepoint); + } + + public void releaseSavepoint(Savepoint savepoint) throws SQLException { + connection.releaseSavepoint(savepoint); + } + + public Statement createStatement(int resultSetType, int resultSetConcurrency, int resultSetHoldability) + throws SQLException { + return connection.createStatement(resultSetType, resultSetConcurrency, resultSetHoldability); + } + + public PreparedStatement prepareStatement(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return connection.prepareStatement(sql, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + public CallableStatement prepareCall(String sql, int resultSetType, int resultSetConcurrency, + int resultSetHoldability) throws SQLException { + return connection.prepareCall(sql, resultSetType, resultSetConcurrency, resultSetHoldability); + } + + public PreparedStatement prepareStatement(String sql, int autoGeneratedKeys) throws SQLException { + return connection.prepareStatement(sql, autoGeneratedKeys); + } + + public PreparedStatement prepareStatement(String sql, int[] columnIndexes) throws SQLException { + return connection.prepareStatement(sql, columnIndexes); + } + + public PreparedStatement prepareStatement(String sql, String[] columnNames) throws SQLException { + return connection.prepareStatement(sql, columnNames); + } + + public Clob createClob() throws SQLException { + return connection.createClob(); + } + + public Blob createBlob() throws SQLException { + return connection.createBlob(); + } + + public NClob createNClob() throws SQLException { + return connection.createNClob(); + } + + public SQLXML createSQLXML() throws SQLException { + return connection.createSQLXML(); + } + + public boolean isValid(int timeout) throws SQLException { + return connection.isValid(timeout); + } + + public void setClientInfo(String name, String value) throws SQLClientInfoException { + connection.setClientInfo(name, value); + } + + public void setClientInfo(Properties properties) throws SQLClientInfoException { + connection.setClientInfo(properties); + } + + public String getClientInfo(String name) throws SQLException { + return connection.getClientInfo(name); + } + + public Properties getClientInfo() throws SQLException { + return connection.getClientInfo(); + } + + public Array createArrayOf(String typeName, Object[] elements) throws SQLException { + return connection.createArrayOf(typeName, elements); + } + + public Struct createStruct(String typeName, Object[] attributes) throws SQLException { + return connection.createStruct(typeName, attributes); + } + + public void setSchema(String schema) throws SQLException { + connection.setSchema(schema); + } + + public String getSchema() throws SQLException { + return connection.getSchema(); + } + + public void abort(Executor executor) throws SQLException { + connection.abort(executor); + } + + public void setNetworkTimeout(Executor executor, int milliseconds) throws SQLException { + connection.setNetworkTimeout(executor, milliseconds); + } + + public int getNetworkTimeout() throws SQLException { + return connection.getNetworkTimeout(); + } + + public T unwrap(Class iface) throws SQLException { + return connection.unwrap(iface); + } + + public boolean isWrapperFor(Class iface) throws SQLException { + return connection.isWrapperFor(iface); + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingPreparedStatementWrapper.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingPreparedStatementWrapper.java new file mode 100644 index 0000000..d8d61ab --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingPreparedStatementWrapper.java @@ -0,0 +1,505 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.logging; + +import java.io.InputStream; +import java.io.Reader; +import java.math.BigDecimal; +import java.net.URL; +import java.sql.Array; +import java.sql.Blob; +import java.sql.Clob; +import java.sql.Date; +import java.sql.NClob; +import java.sql.ParameterMetaData; +import java.sql.PreparedStatement; +import java.sql.Ref; +import java.sql.ResultSet; +import java.sql.ResultSetMetaData; +import java.sql.RowId; +import java.sql.SQLException; +import java.sql.SQLType; +import java.sql.SQLXML; +import java.sql.Time; +import java.sql.Timestamp; +import java.util.Calendar; +import java.util.HashMap; + +/** + * Wrapper of {@link PreparedStatement} to add custom logging behavior when SQL + * queries are processed by the {@link PreparedStatement} + */ +public class LoggingPreparedStatementWrapper extends LoggingStatementWrapper implements PreparedStatement { + + private final PreparedStatement preparedStatement; + + public LoggingPreparedStatementWrapper(final PreparedStatement preparedStatement, final String statementSql, + final JDBCQueriesStore jdbcQueriesStore, final boolean logSqlParams) { + super(preparedStatement, jdbcQueriesStore, statementSql, logSqlParams); + this.preparedStatement = preparedStatement; + } + + public JDBCQueriesStore getJdbcEntriesInMemoryStore() { + return jdbcQueriesStore; + } + + @Override + public ResultSet executeQuery() throws SQLException { + addLogEntry(); + return preparedStatement.executeQuery(); + } + + @Override + public int executeUpdate() throws SQLException { + addLogEntry(); + return preparedStatement.executeUpdate(); + } + + @Override + public boolean execute() throws SQLException { + addLogEntry(); + return preparedStatement.execute(); + } + + @Override + public long executeLargeUpdate() throws SQLException { + addLogEntry(); + return preparedStatement.executeLargeUpdate(); + } + + @Override + public void clearParameters() throws SQLException { + parameters.clear(); + preparedStatement.clearParameters(); + } + + @Override + public void setNull(int parameterIndex, int sqlType) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, sqlType); + } + preparedStatement.setNull(parameterIndex, sqlType); + } + + @Override + public void setBoolean(int parameterIndex, boolean x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBoolean(parameterIndex, x); + } + + @Override + public void setByte(int parameterIndex, byte x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setByte(parameterIndex, x); + } + + @Override + public void setShort(int parameterIndex, short x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setShort(parameterIndex, x); + } + + @Override + public void setInt(int parameterIndex, int x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setInt(parameterIndex, x); + } + + @Override + public void setLong(int parameterIndex, long x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setLong(parameterIndex, x); + } + + @Override + public void setFloat(int parameterIndex, float x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setFloat(parameterIndex, x); + } + + @Override + public void setDouble(int parameterIndex, double x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setDouble(parameterIndex, x); + } + + @Override + public void setBigDecimal(int parameterIndex, BigDecimal x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBigDecimal(parameterIndex, x); + } + + @Override + public void setString(int parameterIndex, String x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setString(parameterIndex, x); + } + + @Override + public void setBytes(int parameterIndex, byte[] x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBytes(parameterIndex, x); + } + + @Override + public void setDate(int parameterIndex, Date x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setDate(parameterIndex, x); + } + + @Override + public void setTime(int parameterIndex, Time x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setTime(parameterIndex, x); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setTimestamp(parameterIndex, x); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, int length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setAsciiStream(parameterIndex, x, length); + } + + /** + * @deprecated since 1.2 + */ + @Override + @Deprecated(since = "1.2") + public void setUnicodeStream(int parameterIndex, InputStream x, int length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setUnicodeStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, int length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBinaryStream(parameterIndex, x, length); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setObject(parameterIndex, x, targetSqlType); + } + + @Override + public void setObject(int parameterIndex, Object x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setObject(parameterIndex, x); + } + + @Override + public void addBatch() throws SQLException { + // Store added batch parameters in the batch parameters list + if (logSqlParams) { + batchParameters.add(new HashMap<>(parameters)); + } + // reset the current parameters map + parameters.clear(); + preparedStatement.addBatch(); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, int length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setRef(int parameterIndex, Ref x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setRef(parameterIndex, x); + } + + @Override + public void setBlob(int parameterIndex, Blob x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBlob(parameterIndex, x); + } + + @Override + public void setClob(int parameterIndex, Clob x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setClob(parameterIndex, x); + } + + @Override + public void setArray(int parameterIndex, Array x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setArray(parameterIndex, x); + } + + @Override + public ResultSetMetaData getMetaData() throws SQLException { + return preparedStatement.getMetaData(); + } + + @Override + public void setDate(int parameterIndex, Date x, Calendar cal) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setDate(parameterIndex, x, cal); + } + + @Override + public void setTime(int parameterIndex, Time x, Calendar cal) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setTime(parameterIndex, x, cal); + } + + @Override + public void setTimestamp(int parameterIndex, Timestamp x, Calendar cal) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setTimestamp(parameterIndex, x, cal); + } + + @Override + public void setNull(int parameterIndex, int sqlType, String typeName) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, sqlType); + } + preparedStatement.setNull(parameterIndex, sqlType, typeName); + } + + @Override + public void setURL(int parameterIndex, URL x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setURL(parameterIndex, x); + } + + @Override + public ParameterMetaData getParameterMetaData() throws SQLException { + return preparedStatement.getParameterMetaData(); + } + + @Override + public void setRowId(int parameterIndex, RowId x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setRowId(parameterIndex, x); + } + + @Override + public void setNString(int parameterIndex, String value) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, value); + } + preparedStatement.setNString(parameterIndex, value); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, value); + } + preparedStatement.setNCharacterStream(parameterIndex, value, length); + } + + @Override + public void setNClob(int parameterIndex, NClob value) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, value); + } + preparedStatement.setNClob(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setClob(parameterIndex, reader, length); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, inputStream); + } + preparedStatement.setBlob(parameterIndex, inputStream, length); + } + + @Override + public void setNClob(int parameterIndex, Reader reader, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setNClob(parameterIndex, reader, length); + } + + @Override + public void setSQLXML(int parameterIndex, SQLXML xmlObject) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, xmlObject); + } + preparedStatement.setSQLXML(parameterIndex, xmlObject); + } + + @Override + public void setObject(int parameterIndex, Object x, int targetSqlType, int scaleOrLength) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setObject(parameterIndex, x, targetSqlType, scaleOrLength); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setAsciiStream(parameterIndex, x, length); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBinaryStream(parameterIndex, x, length); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader, long length) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setCharacterStream(parameterIndex, reader, length); + } + + @Override + public void setAsciiStream(int parameterIndex, InputStream x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setAsciiStream(parameterIndex, x); + } + + @Override + public void setBinaryStream(int parameterIndex, InputStream x) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setBinaryStream(parameterIndex, x); + } + + @Override + public void setCharacterStream(int parameterIndex, Reader reader) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setCharacterStream(parameterIndex, reader); + } + + @Override + public void setNCharacterStream(int parameterIndex, Reader value) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, value); + } + preparedStatement.setNCharacterStream(parameterIndex, value); + } + + @Override + public void setClob(int parameterIndex, Reader reader) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setClob(parameterIndex, reader); + } + + @Override + public void setBlob(int parameterIndex, InputStream inputStream) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, inputStream); + } + preparedStatement.setBlob(parameterIndex, inputStream); + } + + @Override + public void setNClob(int parameterIndex, Reader reader) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, reader); + } + preparedStatement.setNClob(parameterIndex, reader); + } + + @Override + public void setObject(int parameterIndex, Object x, SQLType targetSqlType, int scaleOrLength) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setObject(parameterIndex, x, targetSqlType, scaleOrLength); + } + + @Override + public void setObject(int parameterIndex, Object x, SQLType targetSqlType) throws SQLException { + if (logSqlParams) { + parameters.put(parameterIndex, x); + } + preparedStatement.setObject(parameterIndex, x, targetSqlType); + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingStatementWrapper.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingStatementWrapper.java new file mode 100644 index 0000000..107d18f --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/logging/LoggingStatementWrapper.java @@ -0,0 +1,334 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.logging; + +import java.sql.Connection; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.SQLWarning; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.lang.StringUtils; + +/** + * Wrapper of {@link Statement} to add custom logging behavior when SQL queries + * are processed by the {@link Statement} + */ +public class LoggingStatementWrapper implements Statement { + + private final Statement statement; + protected final JDBCQueriesStore jdbcQueriesStore; + protected final String statementSql; + protected final Map parameters; + protected final List> batchParameters; + protected final boolean logSqlParams; + + public LoggingStatementWrapper(final Statement statement, final JDBCQueriesStore jdbcQueriesStore, + final String statementSql, final boolean logSqlParams) { + this.statement = statement; + this.jdbcQueriesStore = jdbcQueriesStore; + this.statementSql = statementSql; + this.parameters = new HashMap<>(); + this.batchParameters = new ArrayList<>(); + this.logSqlParams = logSqlParams; + } + + public JDBCQueriesStore getJdbcEntriesInMemoryStore() { + return jdbcQueriesStore; + } + + @Override + public ResultSet executeQuery(String sql) throws SQLException { + addLogEntry(sql); + return statement.executeQuery(sql); + } + + @Override + public int executeUpdate(String sql) throws SQLException { + addLogEntry(sql); + return statement.executeUpdate(sql); + } + + protected void addLogEntry(String sql) { + JdbcQueryLog logEntry = new JdbcQueryLog(sql); + getJdbcEntriesInMemoryStore().addEntry(logEntry); + } + + protected void addLogEntry() { + addLogEntry(parameters); + } + + protected void addLogEntry(Map parameters) { + if (StringUtils.isNotBlank(statementSql)) { + JdbcQueryLog logEntry = new JdbcQueryLog(statementSql, parameters); + jdbcQueriesStore.addEntry(logEntry); + } + } + + @Override + public void close() throws SQLException { + statement.close(); + } + + @Override + public int getMaxFieldSize() throws SQLException { + return statement.getMaxFieldSize(); + } + + @Override + public void setMaxFieldSize(int max) throws SQLException { + statement.setMaxFieldSize(max); + } + + @Override + public int getMaxRows() throws SQLException { + return statement.getMaxRows(); + } + + @Override + public void setMaxRows(int max) throws SQLException { + statement.setMaxRows(max); + } + + @Override + public void setEscapeProcessing(boolean enable) throws SQLException { + statement.setEscapeProcessing(enable); + } + + @Override + public int getQueryTimeout() throws SQLException { + return statement.getQueryTimeout(); + } + + @Override + public void setQueryTimeout(int seconds) throws SQLException { + statement.setQueryTimeout(seconds); + } + + @Override + public void cancel() throws SQLException { + statement.cancel(); + } + + @Override + public SQLWarning getWarnings() throws SQLException { + return statement.getWarnings(); + } + + @Override + public void clearWarnings() throws SQLException { + statement.clearWarnings(); + } + + @Override + public void setCursorName(String name) throws SQLException { + statement.setCursorName(name); + } + + @Override + public boolean execute(String sql) throws SQLException { + return statement.execute(sql); + } + + @Override + public ResultSet getResultSet() throws SQLException { + return statement.getResultSet(); + } + + @Override + public int getUpdateCount() throws SQLException { + return statement.getUpdateCount(); + } + + @Override + public boolean getMoreResults() throws SQLException { + return statement.getMoreResults(); + } + + @Override + public void setFetchDirection(int direction) throws SQLException { + statement.setFetchDirection(direction); + } + + @Override + public int getFetchDirection() throws SQLException { + return statement.getFetchDirection(); + } + + @Override + public void setFetchSize(int rows) throws SQLException { + statement.setFetchSize(rows); + } + + @Override + public int getFetchSize() throws SQLException { + return statement.getFetchSize(); + } + + @Override + public int getResultSetConcurrency() throws SQLException { + return statement.getResultSetConcurrency(); + } + + @Override + public int getResultSetType() throws SQLException { + return statement.getResultSetType(); + } + + @Override + public void addBatch(String sql) throws SQLException { + statement.addBatch(sql); + } + + @Override + public void clearBatch() throws SQLException { + statement.clearBatch(); + } + + @Override + public int[] executeBatch() throws SQLException { + if (logSqlParams) { + // When executing the batch, log a query + // for each map of parameter values + batchParameters.forEach(this::addLogEntry); + } else { + // if sql params are not logged, + // log a single entry for the sql statement of the batch + addLogEntry(); + } + return statement.executeBatch(); + } + + @Override + public Connection getConnection() throws SQLException { + return statement.getConnection(); + } + + @Override + public boolean getMoreResults(int current) throws SQLException { + return statement.getMoreResults(current); + } + + @Override + public ResultSet getGeneratedKeys() throws SQLException { + return statement.getGeneratedKeys(); + } + + @Override + public int executeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return statement.executeUpdate(sql, autoGeneratedKeys); + } + + @Override + public int executeUpdate(String sql, int[] columnIndexes) throws SQLException { + return statement.executeUpdate(sql, columnIndexes); + } + + @Override + public int executeUpdate(String sql, String[] columnNames) throws SQLException { + return statement.executeUpdate(sql, columnNames); + } + + @Override + public boolean execute(String sql, int autoGeneratedKeys) throws SQLException { + return statement.execute(sql, autoGeneratedKeys); + } + + @Override + public boolean execute(String sql, int[] columnIndexes) throws SQLException { + return statement.execute(sql, columnIndexes); + } + + @Override + public boolean execute(String sql, String[] columnNames) throws SQLException { + return statement.execute(sql, columnNames); + } + + @Override + public int getResultSetHoldability() throws SQLException { + return statement.getResultSetHoldability(); + } + + @Override + public boolean isClosed() throws SQLException { + return statement.isClosed(); + } + + @Override + public void setPoolable(boolean poolable) throws SQLException { + statement.setPoolable(poolable); + } + + @Override + public boolean isPoolable() throws SQLException { + return statement.isPoolable(); + } + + @Override + public void closeOnCompletion() throws SQLException { + statement.closeOnCompletion(); + } + + @Override + public boolean isCloseOnCompletion() throws SQLException { + return statement.isCloseOnCompletion(); + } + + @Override + public long getLargeUpdateCount() throws SQLException { + return statement.getLargeUpdateCount(); + } + + @Override + public void setLargeMaxRows(long max) throws SQLException { + statement.setLargeMaxRows(max); + } + + @Override + public long getLargeMaxRows() throws SQLException { + return statement.getLargeMaxRows(); + } + + @Override + public long[] executeLargeBatch() throws SQLException { + return statement.executeLargeBatch(); + } + + @Override + public long executeLargeUpdate(String sql) throws SQLException { + return statement.executeLargeUpdate(sql); + } + + @Override + public long executeLargeUpdate(String sql, int autoGeneratedKeys) throws SQLException { + return statement.executeLargeUpdate(sql, autoGeneratedKeys); + } + + @Override + public long executeLargeUpdate(String sql, int[] columnIndexes) throws SQLException { + return statement.executeLargeUpdate(sql, columnIndexes); + } + + @Override + public long executeLargeUpdate(String sql, String[] columnNames) throws SQLException { + return statement.executeLargeUpdate(sql, columnNames); + } + + @Override + public T unwrap(Class iface) throws SQLException { + return statement.unwrap(iface); + } + + @Override + public boolean isWrapperFor(Class iface) throws SQLException { + return statement.isWrapperFor(iface); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceCategory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceCategory.java index 4a794b5..f02501f 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceCategory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceCategory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceProfiler.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceProfiler.java index 0fe3652..4c2d7b1 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceProfiler.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceProfiler.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,14 +7,14 @@ package com.sap.cx.boosters.commercedbsync.performance; import java.util.Collection; -import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; public interface PerformanceProfiler { PerformanceRecorder createRecorder(PerformanceCategory category, String name); void muteRecorder(PerformanceCategory category, String name); - ConcurrentHashMap getRecorders(); + ConcurrentMap getRecorders(); Collection getRecordersByCategory(PerformanceCategory category); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java index feff5c1..47dacd9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceRecorder.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -12,6 +12,7 @@ import javax.annotation.concurrent.ThreadSafe; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; /** @@ -19,11 +20,11 @@ */ public class PerformanceRecorder { - private ConcurrentHashMap records = new ConcurrentHashMap<>(); + private final ConcurrentMap records = new ConcurrentHashMap<>(); - private Stopwatch timer; - private PerformanceCategory category; - private String name; + private final Stopwatch timer; + private final PerformanceCategory category; + private final String name; public PerformanceRecorder(PerformanceCategory category, String name) { this(category, name, false); @@ -65,7 +66,7 @@ public void record(PerformanceUnit unit, double value) { } } - public ConcurrentHashMap getRecords() { + public ConcurrentMap getRecords() { return records; } @@ -81,13 +82,13 @@ public String toString() { @ThreadSafe public static class PerformanceAggregation { - private Stopwatch timer; - private PerformanceUnit performanceUnit; - private TimeUnit timeUnit = TimeUnit.SECONDS; - private AtomicDouble sum = new AtomicDouble(0); - private AtomicDouble max = new AtomicDouble(0); - private AtomicDouble min = new AtomicDouble(0); - private AtomicDouble avg = new AtomicDouble(0); + private final Stopwatch timer; + private final PerformanceUnit performanceUnit; + private final TimeUnit timeUnit = TimeUnit.SECONDS; + private final AtomicDouble sum = new AtomicDouble(0); + private final AtomicDouble max = new AtomicDouble(0); + private final AtomicDouble min = new AtomicDouble(0); + private final AtomicDouble avg = new AtomicDouble(0); public PerformanceAggregation(Stopwatch timer, PerformanceUnit performanceUnit) { this.performanceUnit = performanceUnit; @@ -97,7 +98,7 @@ public PerformanceAggregation(Stopwatch timer, PerformanceUnit performanceUnit) protected void submit(double value) { getTotalThroughput().addAndGet(value); long elapsed = timer.elapsed(TimeUnit.MILLISECONDS); - float elapsedToSeconds = elapsed / 1000f; + double elapsedToSeconds = elapsed / 1000d; if (elapsedToSeconds > 0) { getAvgThroughput().set(getTotalThroughput().get() / elapsedToSeconds); getMaxThroughput().set(Math.max(getMaxThroughput().get(), getAvgThroughput().get())); @@ -131,13 +132,9 @@ public TimeUnit getTimeUnit() { @Override public String toString() { - return "PerformanceAggregation{" + - "performanceUnit=" + performanceUnit + - ", sum=" + sum + - ", max=" + max + " " + performanceUnit + "/" + timeUnit + - ", min=" + min + " " + performanceUnit + "/" + timeUnit + - ", avg=" + avg + " " + performanceUnit + "/" + timeUnit + - '}'; + return "PerformanceAggregation{" + "performanceUnit=" + performanceUnit + ", sum=" + sum + ", max=" + max + + " " + performanceUnit + "/" + timeUnit + ", min=" + min + " " + performanceUnit + "/" + timeUnit + + ", avg=" + avg + " " + performanceUnit + "/" + timeUnit + '}'; } } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceUnit.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceUnit.java index c2d5cfd..2855bbe 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceUnit.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/PerformanceUnit.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/impl/DefaultPerformanceProfiler.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/impl/DefaultPerformanceProfiler.java index 043eec0..d8aa31a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/impl/DefaultPerformanceProfiler.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/performance/impl/DefaultPerformanceProfiler.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -17,8 +17,7 @@ public class DefaultPerformanceProfiler implements PerformanceProfiler { - private ConcurrentHashMap recorders = new ConcurrentHashMap<>(); - + private final ConcurrentHashMap recorders = new ConcurrentHashMap<>(); @Override public PerformanceRecorder createRecorder(PerformanceCategory category, String name) { @@ -45,9 +44,8 @@ public Collection getRecordersByCategory(PerformanceCategor @Override public double getAverageByCategoryAndUnit(PerformanceCategory category, PerformanceUnit unit) { Collection recordersByCategory = getRecordersByCategory(category); - return recordersByCategory.stream().filter(r -> r.getRecords().get(unit) != null).mapToDouble(r -> - r.getRecords().get(unit).getAvgThroughput().get() - ).average().orElse(0); + return recordersByCategory.stream().filter(r -> r.getRecords().get(unit) != null) + .mapToDouble(r -> r.getRecords().get(unit).getAvgThroughput().get()).average().orElse(0); } @Override diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPostProcessor.java index b028ccf..9105e66 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPostProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -14,4 +14,8 @@ public interface MigrationPostProcessor { void process(CopyContext context); + + default boolean shouldExecute(CopyContext context) { + return true; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPreProcessor.java index ca36e35..68b6acd 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/MigrationPreProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -14,4 +14,8 @@ public interface MigrationPreProcessor { void process(CopyContext context); + + default boolean shouldExecute(CopyContext context) { + return true; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java index c28776f..b8dcf17 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/AdjustActiveTypeSystemPostProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -9,6 +9,7 @@ import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import de.hybris.platform.servicelayer.config.ConfigurationService; +import org.apache.commons.lang3.BooleanUtils; import org.apache.commons.lang3.StringUtils; import com.sap.cx.boosters.commercedbsync.processors.MigrationPostProcessor; import org.slf4j.Logger; @@ -17,13 +18,13 @@ import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.SQLException; -import java.util.Arrays; public class AdjustActiveTypeSystemPostProcessor implements MigrationPostProcessor { private static final Logger LOG = LoggerFactory.getLogger(AdjustActiveTypeSystemPostProcessor.class.getName()); private static final String CCV2_TS_MIGRATION_TABLE = "CCV2_TYPESYSTEM_MIGRATIONS"; + // spotless:off private static final String TYPESYSTEM_ADJUST_STATEMENT = "IF (EXISTS (SELECT * \n" + " FROM INFORMATION_SCHEMA.TABLES \n" + " WHERE TABLE_SCHEMA = '%s' \n" + @@ -32,64 +33,57 @@ public class AdjustActiveTypeSystemPostProcessor implements MigrationPostProcess " UPDATE [%3$s] SET [state] = 'retired' WHERE 1=1;\n" + " UPDATE [%3$s] SET [state] = 'current', [comment] = 'Updated by CMT' WHERE [name] = '%s';\n" + "END"; - // ORACLR_TARGET - START - private static final String[] TRUEVALUES = new String[] { "yes", "y", "true", "0" }; - private static final String CMT_DISABLED_POST_PROCESSOR = "migration.data.postprocessor.tscheck.disable"; - private ConfigurationService configurationService; + // spotless:on + private static final String TS_CHECK_POST_PROCESSOR_DISABLED = "migration.data.postprocessor.tscheck.disable"; + private ConfigurationService configurationService; - /** - * @return the configurationService - */ - public ConfigurationService getConfigurationService() { - return configurationService; - } + /** + * @return the configurationService + */ + public ConfigurationService getConfigurationService() { + return configurationService; + } - /** - * @param configurationService - * the configurationService to set - */ - public void setConfigurationService(final ConfigurationService configurationService) { - this.configurationService = configurationService; - } + /** + * @param configurationService + * the configurationService to set + */ + public void setConfigurationService(final ConfigurationService configurationService) { + this.configurationService = configurationService; + } - @Override - public void process(final CopyContext context) { + @Override + public void process(final CopyContext context) { + final DataRepository targetRepository = context.getMigrationContext().getDataTargetRepository(); + final String typeSystemName = targetRepository.getDataSourceConfiguration().getTypeSystemName(); - if (isPostProcesorDisabled()) { - LOG.info("TS post processor is disabled "); - return; - } - final DataRepository targetRepository = context.getMigrationContext().getDataTargetRepository(); - final String typeSystemName = targetRepository.getDataSourceConfiguration().getTypeSystemName(); + try (Connection connection = targetRepository.getConnection(); + PreparedStatement statement = connection.prepareStatement(String.format(TYPESYSTEM_ADJUST_STATEMENT, + targetRepository.getDataSourceConfiguration().getSchema(), typeSystemName, + getMigrationsTableName(targetRepository)))) { + statement.execute(); - try ( Connection connection = targetRepository.getConnection(); - PreparedStatement statement = connection.prepareStatement(String.format(TYPESYSTEM_ADJUST_STATEMENT, - targetRepository.getDataSourceConfiguration().getSchema(), typeSystemName, - getMigrationsTableName(targetRepository))); - ) { - statement.execute(); - - LOG.info("Adjusted active type system to: " + typeSystemName); - } catch (SQLException e) { - LOG.error("Error executing post processor (SQLException) ", e); - } catch (Exception e) { - LOG.error("Error executing post processor", e); - } - } + LOG.info("Adjusted active type system to: " + typeSystemName); + } catch (SQLException e) { + LOG.error("Error executing post processor (SQLException) ", e); + } catch (Exception e) { + LOG.error("Error executing post processor", e); + } + } - private String getMigrationsTableName(final DataRepository repository) { - return StringUtils.trimToEmpty(repository.getDataSourceConfiguration().getTablePrefix()) - .concat(CCV2_TS_MIGRATION_TABLE); - } + @Override + public boolean shouldExecute(CopyContext context) { + return !isPostProcesorDisabled() && !context.getMigrationContext().isDataExportEnabled() + && context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isMssqlUsed(); + } - private boolean isPostProcesorDisabled() { - final String ccv2DisabledProperties = getConfigurationService().getConfiguration() - .getString(CMT_DISABLED_POST_PROCESSOR); - // boolean disabled = false; - if (ccv2DisabledProperties == null || ccv2DisabledProperties.isEmpty()) { - return false; - } - return Arrays.stream(TRUEVALUES).anyMatch(ccv2DisabledProperties::equalsIgnoreCase); - // return disabled; - } + private String getMigrationsTableName(final DataRepository repository) { + return StringUtils.trimToEmpty(repository.getDataSourceConfiguration().getTablePrefix()) + .concat(CCV2_TS_MIGRATION_TABLE); + } + + private boolean isPostProcesorDisabled() { + return BooleanUtils.toBoolean( + getConfigurationService().getConfiguration().getString(TS_CHECK_POST_PROCESSOR_DISABLED, "false")); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/DefaultMigrationPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/DefaultMigrationPostProcessor.java index f7d4745..eaaf10b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/DefaultMigrationPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/DefaultMigrationPostProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/IndexAlignerPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/IndexAlignerPostProcessor.java index cb69a39..32f1d32 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/IndexAlignerPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/IndexAlignerPostProcessor.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.processors.impl; import java.util.Arrays; @@ -19,97 +25,74 @@ import de.hybris.platform.servicelayer.config.ConfigurationService; - -public class IndexAlignerPostProcessor implements MigrationPostProcessor -{ - - private static final Logger LOG = LoggerFactory.getLogger(IndexAlignerPostProcessor.class); - - private ConfigurationService configurationService; - - @Override - public void process(final CopyContext context) - { - final MigrationContext migrationContext = context.getMigrationContext(); - if (migrationContext.isDropAllIndexesEnabled()) - { - LOG.info("Aligning indexes on target..."); - final DataRepository dataTargetRepository = migrationContext.getDataTargetRepository(); - final String indiciesSQL = generateAlterTablesSql(migrationContext); - indiciesSQL.lines().forEach(indexSQL -> { - if (StringUtils.isNotBlank(indexSQL)) - { - LOG.info("Executing {}", indexSQL); - try - { - dataTargetRepository.executeUpdateAndCommit(indexSQL); - } - catch (Exception e) - { - LOG.error("Execution failed for " + indexSQL, e); - } - } - }); - LOG.info("Index alignment on target is completed."); - } - } - - private String generateAlterTablesSql(final MigrationContext migrationContext) - { - String alterTablesSql = ""; - try - { - final Database sourceDatabase = migrationContext.getDataSourceRepository().asDatabase(); - final DataRepository dataTargetRepository = migrationContext.getDataTargetRepository(); - final Database targetDatabase = dataTargetRepository.asDatabase(); - final Set excludedIndices = getExcludedIndicies(); - - for (final String copiedTable : migrationContext.getIncludedTables()) - { - final Table sourceTable = sourceDatabase.findTable(copiedTable); - final Table targetTable = targetDatabase.findTable(copiedTable); - if (sourceTable != null && targetTable != null) - { - final Index[] sourceTableIndices = sourceTable.getIndices(); - final Index[] targetTableIndices = targetTable.getIndices(); - for (final Index sourceTableIndex : sourceTableIndices) - { - if (!ArrayUtils.contains(targetTableIndices, sourceTableIndex) - && !excludedIndices.contains((sourceTable.getName() + "." + sourceTableIndex.getName()).toLowerCase())) - { - LOG.debug("Found missing index {} for {}", sourceTableIndex, copiedTable); - targetTable.addIndex(sourceTableIndex); - } - } - } - else - { - LOG.warn("Table {} is not found one of the databases: source[{}], target[{}]", copiedTable, sourceTable, - targetTable); - } - } - - alterTablesSql = dataTargetRepository.asPlatform().getAlterTablesSql(targetDatabase); - LOG.debug("Generated alter table sql for missing indexes: {}", alterTablesSql); - } - catch (final Exception e) - { - LOG.error("Alter table generation failed", e); - } - - return alterTablesSql; - } - - private Set getExcludedIndicies() - { - final String excludedIndiciesStr = configurationService.getConfiguration() - .getString("migration.data.indices.drop.recreate.exclude"); - - return Arrays.stream(excludedIndiciesStr.split(",")).map(String::toLowerCase).collect(Collectors.toSet()); - } - - public void setConfigurationService(final ConfigurationService configurationService) - { - this.configurationService = configurationService; - } +public class IndexAlignerPostProcessor implements MigrationPostProcessor { + + private static final Logger LOG = LoggerFactory.getLogger(IndexAlignerPostProcessor.class); + + private ConfigurationService configurationService; + + @Override + public void process(final CopyContext context) { + LOG.info("Aligning indexes on target..."); + final MigrationContext migrationContext = context.getMigrationContext(); + final DataRepository dataTargetRepository = migrationContext.getDataTargetRepository(); + final String indiciesSQL = generateAlterTablesSql(migrationContext); + indiciesSQL.lines().forEach(indexSQL -> { + if (StringUtils.isNotBlank(indexSQL)) { + LOG.info("Executing {}", indexSQL); + try { + dataTargetRepository.executeUpdateAndCommit(indexSQL); + } catch (Exception e) { + LOG.error("Execution failed for " + indexSQL, e); + } + } + }); + LOG.info("Index alignment on target is completed."); + } + + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isDropAllIndexesEnabled(); + } + + private String generateAlterTablesSql(final MigrationContext migrationContext) { + final Database sourceDatabase = migrationContext.getDataSourceRepository().asDatabase(); + final DataRepository dataTargetRepository = migrationContext.getDataTargetRepository(); + final Database targetDatabase = dataTargetRepository.asDatabase(); + final Set excludedIndices = getExcludedIndicies(); + + for (final String copiedTable : migrationContext.getIncludedTables()) { + final Table sourceTable = sourceDatabase.findTable(copiedTable); + final Table targetTable = targetDatabase.findTable(copiedTable); + if (sourceTable != null && targetTable != null) { + final Index[] sourceTableIndices = sourceTable.getIndices(); + final Index[] targetTableIndices = targetTable.getIndices(); + for (final Index sourceTableIndex : sourceTableIndices) { + if (!ArrayUtils.contains(targetTableIndices, sourceTableIndex) && !excludedIndices + .contains((sourceTable.getName() + "." + sourceTableIndex.getName()).toLowerCase())) { + LOG.debug("Found missing index {} for {}", sourceTableIndex, copiedTable); + targetTable.addIndex(sourceTableIndex); + } + } + } else { + LOG.warn("Table {} is not found one of the databases: source[{}], target[{}]", copiedTable, sourceTable, + targetTable); + } + } + + final String alterTablesSql = dataTargetRepository.asPlatform().getAlterTablesSql(targetDatabase); + LOG.debug("Generated alter table sql for missing indexes: {}", alterTablesSql); + return alterTablesSql; + } + + private Set getExcludedIndicies() { + final String excludedIndiciesStr = configurationService.getConfiguration() + .getString("migration.data.indices.drop.recreate.exclude"); + + return Arrays.stream(excludedIndiciesStr.split(",")).map(String::toLowerCase).collect(Collectors.toSet()); + } + + public void setConfigurationService(final ConfigurationService configurationService) { + this.configurationService = configurationService; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/JdbcQueriesPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/JdbcQueriesPostProcessor.java new file mode 100644 index 0000000..aa6f920 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/JdbcQueriesPostProcessor.java @@ -0,0 +1,39 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.processors.impl; + +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.processors.MigrationPostProcessor; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Post-processor producing and storing reports on the JDBC queries that were + * executed during a migration against the source and target data repositories. + */ +public class JdbcQueriesPostProcessor implements MigrationPostProcessor { + + private static final Logger LOG = LoggerFactory.getLogger(JdbcQueriesPostProcessor.class.getName()); + + @Override + public void process(CopyContext context) { + try { + context.getMigrationContext().getDataSourceRepository().getJdbcQueriesStore() + .writeToLogFileAndCompress(context.getMigrationId()); + context.getMigrationContext().getDataTargetRepository().getJdbcQueriesStore() + .writeToLogFileAndCompress(context.getMigrationId()); + LOG.info("Finished writing jdbc entries report"); + } catch (Exception e) { + LOG.error("Error executing post processor", e); + } + } + + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isLogSql(); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java index 01d95b0..d80cdd5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ReportMigrationPostProcessor.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -31,10 +31,15 @@ public class ReportMigrationPostProcessor implements MigrationPostProcessor { @Override public void process(CopyContext context) { + if (!databaseMigrationReportStorageService.validateConnection()) { + LOG.warn("Could not establish connection to report storage. Migration report will not be stored"); + return; + } + try { - final GsonBuilder gsonBuilder = new GsonBuilder(); - gsonBuilder.registerTypeAdapter(LocalDateTime.class, new LocalDateTypeAdapter()); - Gson gson = gsonBuilder.setPrettyPrinting().create(); + final GsonBuilder gsonBuilder = new GsonBuilder(); + gsonBuilder.registerTypeAdapter(LocalDateTime.class, new LocalDateTypeAdapter()); + Gson gson = gsonBuilder.setPrettyPrinting().create(); MigrationReport migrationReport = databaseMigrationReportService.getMigrationReport(context); InputStream is = new ByteArrayInputStream(gson.toJson(migrationReport).getBytes(StandardCharsets.UTF_8)); databaseMigrationReportStorageService.store(context.getMigrationId() + ".json", is); @@ -48,7 +53,8 @@ public void setDatabaseMigrationReportService(DatabaseMigrationReportService dat this.databaseMigrationReportService = databaseMigrationReportService; } - public void setDatabaseMigrationReportStorageService(DatabaseMigrationReportStorageService databaseMigrationReportStorageService) { + public void setDatabaseMigrationReportStorageService( + DatabaseMigrationReportStorageService databaseMigrationReportStorageService) { this.databaseMigrationReportStorageService = databaseMigrationReportStorageService; } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java index f82b8ba..f3fa44b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TransformFunctionGeneratorPreProcessor.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.processors.impl; import org.apache.commons.lang.StringUtils; @@ -12,51 +18,34 @@ import de.hybris.bootstrap.ddl.DataBaseProvider; +public class TransformFunctionGeneratorPreProcessor implements MigrationPreProcessor { + + private static final Logger LOG = LoggerFactory.getLogger(TransformFunctionGeneratorPreProcessor.class); + + @Override + public void process(final CopyContext context) { + final MigrationContext migrationContext = context.getMigrationContext(); + final DataRepository dataSourceRepository = migrationContext.getDataSourceRepository(); + final String platformSpecificSQL = getPlatformSpecificSQL(dataSourceRepository.getDatabaseProvider()); + if (StringUtils.isNotBlank(platformSpecificSQL)) { + dataSourceRepository.runSqlScriptOnPrimary( + new ClassPathResource("/sql/transformationFunctions/" + platformSpecificSQL)); + } + } + + private String getPlatformSpecificSQL(final DataBaseProvider databaseProvider) { + String platformSpecificSQL = "mssql-general.sql"; + if (databaseProvider.isHanaUsed() || databaseProvider.isOracleUsed() || databaseProvider.isPostgreSqlUsed()) { + platformSpecificSQL = null; + } + + LOG.info("Identified platform specific transformation function SQL {}", platformSpecificSQL); + + return platformSpecificSQL; + } -public class TransformFunctionGeneratorPreProcessor implements MigrationPreProcessor -{ - - private static final Logger LOG = LoggerFactory.getLogger(TransformFunctionGeneratorPreProcessor.class); - - @Override - public void process(final CopyContext context) - { - final MigrationContext migrationContext = context.getMigrationContext(); - final DataRepository dataSourceRepository = migrationContext.getDataSourceRepository(); - final String platformSpecificSQL = getPlatformSpecificSQL(dataSourceRepository.getDatabaseProvider()); - if (StringUtils.isNotBlank(platformSpecificSQL)) - { - dataSourceRepository.runSqlScriptOnPrimary(new ClassPathResource("/sql/transformationFunctions/" + platformSpecificSQL)); - } - } - - private String getPlatformSpecificSQL(final DataBaseProvider databaseProvider) - { - String platformSpecificSQL = "-general.sql"; - switch (databaseProvider.getDbName().toLowerCase()) - { - case "sqlserver": - platformSpecificSQL = "mssql" + platformSpecificSQL; - break; - case "mysql": - platformSpecificSQL = "mysql" + platformSpecificSQL; - break; - case "oracle": - platformSpecificSQL = "oracle" + platformSpecificSQL; - break; - case "postgresql": - platformSpecificSQL = "postgresql" + platformSpecificSQL; - break; - case "sap": - platformSpecificSQL = "sap" + platformSpecificSQL; - break; - default: - platformSpecificSQL = "hsqldb" + platformSpecificSQL; - - } - - LOG.info("Identified platform specific transformation function SQL {}", platformSpecificSQL); - - return platformSpecificSQL; - } + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isDataExportEnabled(); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java index cc637b0..0773434 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TruncateNotMigratedTablesPreProcessor.java @@ -1,7 +1,14 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.processors.impl; import java.util.Set; +import org.apache.commons.collections4.CollectionUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -10,39 +17,34 @@ import com.sap.cx.boosters.commercedbsync.processors.MigrationPreProcessor; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; +public class TruncateNotMigratedTablesPreProcessor implements MigrationPreProcessor { + + private static final Logger LOG = LoggerFactory.getLogger(TruncateNotMigratedTablesPreProcessor.class); + + @Override + public void process(final CopyContext context) { + final MigrationContext migrationContext = context.getMigrationContext(); + final Set migrationItems = migrationContext.getIncludedTables(); + final DataRepository dataTargetRepository = migrationContext.getDataTargetRepository(); + try { + dataTargetRepository.getAllTableNames().stream().filter(table -> !migrationItems.contains(table)) + .forEach(notMigratedTable -> { + try { + dataTargetRepository.truncateTable(notMigratedTable); + LOG.debug("Not-migrated {} table is truncated", notMigratedTable); + } catch (final Exception e) { + LOG.error("Cannot truncate not-migrated table", e); + } + }); + } catch (Exception e) { + LOG.error("TruncateNotMigratedTablesPreprocessor is failed", e); + } + } -public class TruncateNotMigratedTablesPreProcessor implements MigrationPreProcessor -{ - - private static final Logger LOG = LoggerFactory.getLogger(TruncateNotMigratedTablesPreProcessor.class); - - @Override - public void process(final CopyContext context) - { - final MigrationContext migrationContext = context.getMigrationContext(); - if (migrationContext.isFullDatabaseMigration()) - { - final Set migrationItems = migrationContext.getIncludedTables(); - final DataRepository dataTargetRepository = migrationContext.getDataTargetRepository(); - try - { - dataTargetRepository.getAllTableNames().stream().filter(table -> !migrationItems.contains(table)) - .forEach(notMigratedTable -> { - try - { - dataTargetRepository.truncateTable(notMigratedTable); - LOG.debug("Not-migrated {} table is truncated", notMigratedTable); - } - catch (final Exception e) - { - LOG.error("Cannot truncate not-migrated table", e); - } - }); - } - catch (Exception e) - { - LOG.error("TruncateNotMigratedTablesPreprocessor is failed", e); - } - } - } + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isDataExportEnabled() + && context.getMigrationContext().isFullDatabaseMigration() + && CollectionUtils.isNotEmpty(context.getMigrationContext().getIncludedTables()); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java index 4c3f861..aa29653 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/TypeInfoTableGeneratorPreProcessor.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.processors.impl; import java.util.HashMap; @@ -24,105 +30,80 @@ import de.hybris.platform.servicelayer.config.ConfigurationService; import de.hybris.platform.servicelayer.search.FlexibleSearchService; - -public class TypeInfoTableGeneratorPreProcessor implements MigrationPreProcessor -{ - - private static final Logger LOG = LoggerFactory.getLogger(TypeInfoTableGeneratorPreProcessor.class); - - private FlexibleSearchService flexibleSearchService; - private ConfigurationService configurationService; - - @Override - public void process(final CopyContext context) - { - final MigrationContext migrationContext = context.getMigrationContext(); - final DataRepository dataSourceRepository = migrationContext.getDataSourceRepository(); - final String platformSpecificSQL = getPlatformSpecificSQL(dataSourceRepository.getDatabaseProvider()); - if (StringUtils.isNotBlank(platformSpecificSQL)) - { - dataSourceRepository.runSqlScriptOnPrimary(new ClassPathResource("/sql/transformationFunctions/" + platformSpecificSQL)); - - final Set typeInfos = getRequiredTypeInfos(); - if (!typeInfos.isEmpty()) - { - final Map params = new HashMap<>(); - params.put("composedTypes", typeInfos); - flexibleSearchService - . search("SELECT {pk} FROM {ComposedType} WHERE {Code} IN (?composedTypes)", params) - .getResult().stream().forEach(composedType -> { - try - { - dataSourceRepository.executeUpdateAndCommitOnPrimary("INSERT INTO MIGRATIONTOOLKIT_TF_TYPEINFO VALUES ('" - + composedType.getCode() + "', " + composedType.getPk().getLongValue() + ")"); - } - catch (final Exception e) - { - LOG.error("Cannot insert into MIGRATIONTOOLKIT_TF_TYPEINFO", e); - } - }); - } - } - } - - private String getPlatformSpecificSQL(final DataBaseProvider databaseProvider) - { - String platformSpecificSQL = "-typeinfotable.sql"; - switch (databaseProvider.getDbName().toLowerCase()) - { - case "sqlserver": - platformSpecificSQL = "mssql" + platformSpecificSQL; - break; - case "mysql": - platformSpecificSQL = "mysql" + platformSpecificSQL; - break; - case "oracle": - platformSpecificSQL = "oracle" + platformSpecificSQL; - break; - case "postgresql": - platformSpecificSQL = "postgresql" + platformSpecificSQL; - break; - case "sap": - platformSpecificSQL = "sap" + platformSpecificSQL; - break; - default: - platformSpecificSQL = "hsqldb" + platformSpecificSQL; - - } - - LOG.info("Identified platform specific typeinfo table SQL {}", platformSpecificSQL); - - return platformSpecificSQL; - } - - public Set getRequiredTypeInfos() - { - final Set types = new HashSet<>(); - final Configuration subset = configurationService.getConfiguration().subset("migration.data.t.typeinfo"); - final Iterator keys = subset.getKeys(); - while (keys.hasNext()) - { - final String current = keys.next(); - final List subkeyList = Splitter.on(".").splitToList(current); - if (subkeyList.size() == 2 && "enabled".equals(subkeyList.get(1))) - { - boolean val = subset.getBoolean(current, false); - if (val) - { - types.add(subkeyList.get(0)); - } - } - } - return types; - } - - public void setFlexibleSearchService(final FlexibleSearchService flexibleSearchService) - { - this.flexibleSearchService = flexibleSearchService; - } - - public void setConfigurationService(final ConfigurationService configurationService) - { - this.configurationService = configurationService; - } +public class TypeInfoTableGeneratorPreProcessor implements MigrationPreProcessor { + + private static final Logger LOG = LoggerFactory.getLogger(TypeInfoTableGeneratorPreProcessor.class); + + private FlexibleSearchService flexibleSearchService; + private ConfigurationService configurationService; + + @Override + public void process(final CopyContext context) { + final MigrationContext migrationContext = context.getMigrationContext(); + final DataRepository dataSourceRepository = migrationContext.getDataSourceRepository(); + final String platformSpecificSQL = getPlatformSpecificSQL(dataSourceRepository.getDatabaseProvider()); + if (StringUtils.isNotBlank(platformSpecificSQL)) { + dataSourceRepository.runSqlScriptOnPrimary( + new ClassPathResource("/sql/transformationFunctions/" + platformSpecificSQL)); + + final Set typeInfos = getRequiredTypeInfos(); + if (!typeInfos.isEmpty()) { + final Map params = new HashMap<>(); + params.put("composedTypes", typeInfos); + flexibleSearchService + .search("SELECT {pk} FROM {ComposedType} WHERE {Code} IN (?composedTypes)", + params) + .getResult().stream().forEach(composedType -> { + try { + dataSourceRepository.executeUpdateAndCommitOnPrimary( + "INSERT INTO MIGRATIONTOOLKIT_TF_TYPEINFO VALUES ('" + composedType.getCode() + + "', " + composedType.getPk().getLongValue() + ")"); + } catch (final Exception e) { + LOG.error("Cannot insert into MIGRATIONTOOLKIT_TF_TYPEINFO", e); + } + }); + } + } + } + + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isDataExportEnabled(); + } + + private String getPlatformSpecificSQL(final DataBaseProvider databaseProvider) { + String platformSpecificSQL = "mssql-typeinfotable.sql"; + if (databaseProvider.isHanaUsed() || databaseProvider.isOracleUsed() || databaseProvider.isPostgreSqlUsed()) { + platformSpecificSQL = null; + } + + LOG.info("Identified platform specific typeinfo table SQL {}", platformSpecificSQL); + + return platformSpecificSQL; + } + + public Set getRequiredTypeInfos() { + final Set types = new HashSet<>(); + final Configuration subset = configurationService.getConfiguration().subset("migration.data.t.typeinfo"); + final Iterator keys = subset.getKeys(); + while (keys.hasNext()) { + final String current = keys.next(); + final List subkeyList = Splitter.on(".").splitToList(current); + if (subkeyList.size() == 2 && "enabled".equals(subkeyList.get(1))) { + boolean val = subset.getBoolean(current, false); + if (val) { + types.add(subkeyList.get(0)); + } + } + } + return types; + } + + public void setFlexibleSearchService(final FlexibleSearchService flexibleSearchService) { + this.flexibleSearchService = flexibleSearchService; + } + + public void setConfigurationService(final ConfigurationService configurationService) { + this.configurationService = configurationService; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java index b9e0cf0..e95830b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewDropPostProcessor.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.processors.impl; import java.util.Set; @@ -10,31 +16,30 @@ import com.sap.cx.boosters.commercedbsync.processors.MigrationPostProcessor; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; - -public class ViewDropPostProcessor implements MigrationPostProcessor -{ - private static final Logger LOG = LoggerFactory.getLogger(ViewDropPostProcessor.class); - - private static final String DROP_VIEW = "DROP VIEW %s;"; - - @Override - public void process(final CopyContext context) - { - final MigrationContext migrationContext = context.getMigrationContext(); - final DataRepository dataSourceRepository = migrationContext.getDataSourceRepository(); - final Set tables = migrationContext.getTablesForViews(); - - tables.stream().forEach(table -> { - try - { - final String viewName = migrationContext.getItemTypeViewNameByTable(table, dataSourceRepository); - dataSourceRepository.executeUpdateAndCommitOnPrimary(String.format(DROP_VIEW, viewName)); - LOG.info("View {} is dropped", viewName); - } - catch (Exception e) - { - LOG.error("View dropped failed", e); - } - }); - } +public class ViewDropPostProcessor implements MigrationPostProcessor { + private static final Logger LOG = LoggerFactory.getLogger(ViewDropPostProcessor.class); + + private static final String DROP_VIEW = "DROP VIEW %s;"; + + @Override + public void process(final CopyContext context) { + final MigrationContext migrationContext = context.getMigrationContext(); + final DataRepository dataSourceRepository = migrationContext.getDataSourceRepository(); + final Set tables = migrationContext.getTablesForViews(); + + tables.stream().forEach(table -> { + try { + final String viewName = migrationContext.getItemTypeViewNameByTable(table, dataSourceRepository); + dataSourceRepository.executeUpdateAndCommitOnPrimary(String.format(DROP_VIEW, viewName)); + LOG.info("View {} is dropped", viewName); + } catch (Exception e) { + LOG.error("View dropped failed", e); + } + }); + } + + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isDataExportEnabled(); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java index 8d8844b..47c61b5 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/processors/impl/ViewGeneratorPreProcessor.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.processors.impl; import java.sql.SQLException; @@ -12,50 +18,43 @@ import com.sap.cx.boosters.commercedbsync.processors.MigrationPreProcessor; import com.sap.cx.boosters.commercedbsync.views.TableViewGenerator; +public class ViewGeneratorPreProcessor implements MigrationPreProcessor { + + private static final Logger LOG = LoggerFactory.getLogger(ViewGeneratorPreProcessor.class); + + @Override + public void process(final CopyContext context) { + final MigrationContext ctx = context.getMigrationContext(); + final Set tables = ctx.getTablesForViews(); + final TableViewGenerator generator = new TableViewGenerator(); + tables.stream().map(t -> { + try { + return generator.generateForTable(t, ctx); + } catch (Exception e) { + LOG.error(String.format("couldn't generate ctx for table %s", t), e); + return null; + } + }).filter(Objects::nonNull).map(generator::generateViewDefinition).forEach(t -> { + try { + context.getMigrationContext().getDataSourceRepository().executeUpdateAndCommitOnPrimary(t); + } catch (Exception e) { + LOG.error(String.format("couldn't execute view creation %s", t), e); + } + }); + // Override setting if view has not been existing before + context.getCopyItems().stream().forEach(ci -> { + try { + final String sTableName = context.getMigrationContext().getItemTypeViewNameByTable(ci.getSourceItem(), + context.getMigrationContext().getDataSourceRepository()); + ci.setSourceItem(sTableName); + } catch (SQLException e) { + LOG.error(String.format("could not check view mapping for table: %s", ci.getSourceItem()), e); + } + }); + } -public class ViewGeneratorPreProcessor implements MigrationPreProcessor -{ - - private static final Logger LOG = LoggerFactory.getLogger(ViewGeneratorPreProcessor.class); - - @Override - public void process(final CopyContext context) - { - final MigrationContext ctx = context.getMigrationContext(); - final Set tables = ctx.getTablesForViews(); - final TableViewGenerator generator = new TableViewGenerator(); - tables.stream().map(t -> { - try - { - return generator.generateForTable(t, ctx); - } - catch (Exception e) - { - LOG.error(String.format("couldn't generate ctx for table %s", t), e); - return null; - } - }).filter(Objects::nonNull).map(generator::generateViewDefinition).forEach(t -> { - try - { - context.getMigrationContext().getDataSourceRepository().executeUpdateAndCommitOnPrimary(t); - } - catch (Exception e) - { - LOG.error(String.format("couldn't execute view creation %s", t), e); - } - }); - // Override setting if view has not been existing before - context.getCopyItems().stream().forEach(ci -> { - try - { - final String sTableName = context.getMigrationContext().getItemTypeViewNameByTable(ci.getSourceItem(), - context.getMigrationContext().getDataSourceRepository()); - ci.setSourceItem(sTableName); - } - catch (SQLException e) - { - LOG.error(String.format("could not check view mapping for table: %s", ci.getSourceItem()), e); - } - }); - } + @Override + public boolean shouldExecute(CopyContext context) { + return context.getMigrationContext().isDataExportEnabled(); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfiguration.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfiguration.java index edf6d1e..90ad356 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfiguration.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfiguration.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -15,7 +15,7 @@ public interface DataSourceConfiguration { String getDriver(); String getConnectionString(); - + String getConnectionStringPrimary(); String getUserName(); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfigurationFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfigurationFactory.java new file mode 100644 index 0000000..b150f22 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/DataSourceConfigurationFactory.java @@ -0,0 +1,14 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.profile; + +/** + * Factory to create datasource configurations based on profiles + */ +public interface DataSourceConfigurationFactory { + DataSourceConfiguration create(String profile); +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfiguration.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfiguration.java index b9d34a4..e595316 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfiguration.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfiguration.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -15,7 +15,7 @@ */ public class DefaultDataSourceConfiguration implements DataSourceConfiguration { - private String profile; + private final String profile; private String driver; private String connectionString; private String connectionStringPrimary; @@ -55,7 +55,7 @@ public String getConnectionString() { public String getConnectionStringPrimary() { return connectionStringPrimary; } - + @Override public String getUserName() { return userName; @@ -125,7 +125,8 @@ protected void load(Configuration configuration, String profile) { this.maxActive = parseInt(getProfileProperty(profile, configuration, "db.connection.pool.size.active.max")); this.maxIdle = parseInt(getProfileProperty(profile, configuration, "db.connection.pool.size.idle.max")); this.minIdle = parseInt(getProfileProperty(profile, configuration, "db.connection.pool.size.idle.min")); - this.removedAbandoned = Boolean.parseBoolean(getProfileProperty(profile, configuration, "db.connection.removeabandoned")); + this.removedAbandoned = Boolean + .parseBoolean(getProfileProperty(profile, configuration, "db.connection.removeabandoned")); } protected String getNormalProperty(Configuration configuration, String key) { @@ -140,19 +141,18 @@ protected int parseInt(String value) { } } - protected String getProfileProperty(final String profile, final Configuration configuration, final String key) - { - return getProfileProperty(profile, configuration, key, true); + protected String getProfileProperty(final String profile, final Configuration configuration, final String key) { + return getProfileProperty(profile, configuration, key, true); } - + protected String getProfileProperty(String profile, Configuration configuration, String key, boolean checkPropery) { String profilePropertyKey = createProfilePropertyKey(key, profile); String property = configuration.getString(profilePropertyKey); if (StringUtils.startsWith(property, "${")) { property = configuration.getString(StringUtils.substringBetween(property, "{", "}")); } - if(checkPropery) { - return checkProperty(property, profilePropertyKey); + if (checkPropery) { + return checkProperty(property, profilePropertyKey); } return property; } @@ -161,8 +161,7 @@ protected String checkProperty(String property, String key) { if (property != null) { return property; } else { - throw new IllegalArgumentException(String.format( - "property %s doesn't exist", key)); + throw new IllegalArgumentException(String.format("property %s doesn't exist", key)); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfigurationFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfigurationFactory.java new file mode 100644 index 0000000..d05e175 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/DefaultDataSourceConfigurationFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.profile.impl; + +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfigurationFactory; +import de.hybris.platform.servicelayer.config.ConfigurationService; + +public class DefaultDataSourceConfigurationFactory implements DataSourceConfigurationFactory { + + private final ConfigurationService configurationService; + + public DefaultDataSourceConfigurationFactory(ConfigurationService configurationService) { + this.configurationService = configurationService; + } + + @Override + public DataSourceConfiguration create(String profile) { + return new DefaultDataSourceConfiguration(configurationService.getConfiguration(), profile); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/InvalidDataSourceConfigurationException.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/InvalidDataSourceConfigurationException.java new file mode 100644 index 0000000..a1950b0 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/profile/impl/InvalidDataSourceConfigurationException.java @@ -0,0 +1,16 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.profile.impl; + +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; + +public class InvalidDataSourceConfigurationException extends RuntimeException { + public InvalidDataSourceConfigurationException(String message, DataSourceConfiguration dataSourceConfiguration) { + super(message + ": " + dataSourceConfiguration); + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/CopyItemProvider.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/CopyItemProvider.java index 1a6761f..dac82e9 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/CopyItemProvider.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/CopyItemProvider.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/impl/DefaultDataCopyItemProvider.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/impl/DefaultDataCopyItemProvider.java index 9e47c5a..10b88a0 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/impl/DefaultDataCopyItemProvider.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/provider/impl/DefaultDataCopyItemProvider.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -32,242 +32,232 @@ public class DefaultDataCopyItemProvider implements CopyItemProvider { - public static final String SN_SUFFIX = "sn"; - private static final String LP_SUFFIX = "lp"; - private static final String LP_SUFFIX_UPPER = "LP"; - - private static final Logger LOG = LoggerFactory.getLogger(DefaultDataCopyItemProvider.class); - - private static final String[] TYPE_SYSTEM_RELATED_TYPES = new String[] { "atomictypes", "attributeDescriptors", - "collectiontypes", "composedtypes", "enumerationvalues", "maptypes" }; - private final Comparator tableCandidateComparator = (o1, o2) -> o1.getCommonTableName() - .compareToIgnoreCase(o2.getCommonTableName()); - private DataCopyTableFilter dataCopyTableFilter; - - private static boolean shouldMigrateAuditTable(final MigrationContext context, final String auditTableName) { - return context.isAuditTableMigrationEnabled() && StringUtils.isNotEmpty(auditTableName); - } - - // ORACLE_TARGET - START - private void logTables(final Set tablesCandidates, final String debugtext) { - if (LOG.isDebugEnabled()) { - LOG.debug("---------START------," + debugtext); - if (tablesCandidates != null && tablesCandidates.size() > 0) { - for (final TableCandidate source : tablesCandidates) { - LOG.debug("$$Table Common Name" + source.getCommonTableName() + ", Base Table =" - + source.getBaseTableName() + ", Suffix = " + source.getAdditionalSuffix() + " , Full TB = " - + source.getFullTableName() + ",Table Name = " + source.getTableName()); - } - LOG.debug("---------END------," + debugtext); - } - } - - } - // ORACLE_TARGET - END - - @Override - public Set get(final MigrationContext context) throws Exception { - final Set sourceTablesCandidates = getSourceTableCandidates(context); - final Set targetTablesCandidates = getTargetTableCandidates(context); - final Sets.SetView sourceTables = Sets.intersection(sourceTablesCandidates, - targetTablesCandidates); - - // ORACLE_TARGET --START ONLY FOR DEBUG - logTables(sourceTablesCandidates, "source table candidates"); - logTables(targetTablesCandidates, "target table candidates"); - logTables(sourceTables, "intersection tables"); - // ORACLE_TARGET --END ONLY FOR DEBUG - - final Set sourceTablesToMigrate = sourceTables.stream() - .filter(t -> dataCopyTableFilter.filter(context).test(t.getCommonTableName())) - .collect(Collectors.toSet()); - - return createCopyItems(context, sourceTablesToMigrate, targetTablesCandidates.stream() - .collect(Collectors.toMap(t -> t.getCommonTableName().toLowerCase(), t -> t))); - } - - @Override - public Set getSourceTableCandidates(final MigrationContext context) throws Exception { - return getTableCandidates(context, context.getDataSourceRepository()); - } - - @Override - public Set getTargetTableCandidates(final MigrationContext context) throws Exception { - final DataRepository targetRepository = context.getDataTargetRepository(); - final String prefix = targetRepository.getDataSourceConfiguration().getTablePrefix(); - - return targetRepository.getAllTableNames().stream() - .filter(n -> prefix == null || StringUtils.startsWithIgnoreCase(n, prefix)) - .map(n -> StringUtils.removeStartIgnoreCase(n, prefix)) - .filter(n -> !isNonMatchingTypesystemTable(targetRepository, n)) - .map(n -> createTableCandidate(targetRepository, n)) - .collect(Collectors.toCollection(() -> new TreeSet<>(tableCandidateComparator))); - } - - private boolean isNonMatchingTypesystemTable(final DataRepository repository, final String tableName) { - boolean isTypesystemTable = false; - // ORACLE_TARGET -- TODO SUFFIX SN_SUFFIX case ?? - if (!StringUtils.endsWithIgnoreCase(tableName, SN_SUFFIX)) { - isTypesystemTable = Arrays.stream(TYPE_SYSTEM_RELATED_TYPES) - .anyMatch(t -> StringUtils.startsWithIgnoreCase(tableName, t)); - } - if (isTypesystemTable) { - - final String additionalSuffix = getAdditionalSuffix(tableName, - repository.getDatabaseProvider()); - final String tableNameWithoutAdditionalSuffix = getTableNameWithoutAdditionalSuffix(tableName, - additionalSuffix); - final String typeSystemSuffix = repository.getDataSourceConfiguration().getTypeSystemSuffix(); - LOG.debug("$$TS table name=" + tableName + ",additionalSuffix=" + additionalSuffix - + ", tableNameWithoutAdditionalSuffix=" + tableNameWithoutAdditionalSuffix + ",typeSystemSuffix=" - + typeSystemSuffix); - LOG.debug("$$TS check=" - + !StringUtils.endsWithIgnoreCase(tableNameWithoutAdditionalSuffix, typeSystemSuffix)); - return !StringUtils.endsWithIgnoreCase(tableNameWithoutAdditionalSuffix, typeSystemSuffix); - } - return false; - } - - private Set getTableCandidates(final MigrationContext context, final DataRepository repository) - throws Exception { - final Set allTableNames = repository.getAllTableNames(); - - LOG.debug("$$ALL TABLES...getTableCandidates " + allTableNames); - final Set tableCandidates = new TreeSet<>(tableCandidateComparator); - - //add meta tables + public static final String SN_SUFFIX = "sn"; + private static final String LP_SUFFIX = "lp"; + private static final String LP_SUFFIX_UPPER = "LP"; + + private static final Logger LOG = LoggerFactory.getLogger(DefaultDataCopyItemProvider.class); + + private static final String[] TYPE_SYSTEM_RELATED_TYPES = new String[]{"atomictypes", "attributeDescriptors", + "collectiontypes", "composedtypes", "enumerationvalues", "maptypes"}; + private final Comparator tableCandidateComparator = (o1, o2) -> o1.getCommonTableName() + .compareToIgnoreCase(o2.getCommonTableName()); + private DataCopyTableFilter dataCopyTableFilter; + + private static boolean shouldMigrateAuditTable(final MigrationContext context, final String auditTableName) { + return context.isAuditTableMigrationEnabled() && StringUtils.isNotEmpty(auditTableName); + } + + private void logTables(final Set tablesCandidates, final String debugtext) { + if (LOG.isDebugEnabled()) { + LOG.debug("---------START------," + debugtext); + if (tablesCandidates != null && tablesCandidates.size() > 0) { + for (final TableCandidate source : tablesCandidates) { + LOG.debug("$$Table Common Name = " + source.getCommonTableName() + ", Base Table = " + + source.getBaseTableName() + ", Suffix = " + source.getAdditionalSuffix() + + ", Full Name = " + source.getFullTableName() + ", Table Name = " + source.getTableName()); + } + LOG.debug("---------END------," + debugtext); + } + } + + } + + @Override + public Set get(final MigrationContext context) throws Exception { + final Set sourceTablesCandidates = getSourceTableCandidates(context); + final Set targetTablesCandidates = getTargetTableCandidates(context); + final Sets.SetView sourceTables = Sets.intersection(sourceTablesCandidates, + targetTablesCandidates); + + logTables(sourceTablesCandidates, "source table candidates"); + logTables(targetTablesCandidates, "target table candidates"); + logTables(sourceTables, "intersection tables"); + + final Set sourceTablesToMigrate = sourceTables.stream() + .filter(t -> dataCopyTableFilter.filter(context).test(t.getCommonTableName())) + .collect(Collectors.toSet()); + + return createCopyItems(context, sourceTablesToMigrate, targetTablesCandidates.stream() + .collect(Collectors.toMap(t -> t.getCommonTableName().toLowerCase(), t -> t))); + } + + @Override + public Set getSourceTableCandidates(final MigrationContext context) throws Exception { + return getTableCandidates(context, context.getDataSourceRepository()); + } + + @Override + public Set getTargetTableCandidates(final MigrationContext context) throws Exception { + final DataRepository targetRepository = context.getDataTargetRepository(); + final String prefix = targetRepository.getDataSourceConfiguration().getTablePrefix(); + + return targetRepository.getAllTableNames().stream() + .filter(n -> prefix == null || StringUtils.startsWithIgnoreCase(n, prefix)) + .map(n -> StringUtils.removeStartIgnoreCase(n, prefix)) + .filter(n -> !isNonMatchingTypesystemTable(targetRepository, n)) + .map(n -> createTableCandidate(targetRepository, n)) + .collect(Collectors.toCollection(() -> new TreeSet<>(tableCandidateComparator))); + } + + private boolean isNonMatchingTypesystemTable(final DataRepository repository, final String tableName) { + boolean isTypesystemTable = false; + + if (!StringUtils.endsWithIgnoreCase(tableName, SN_SUFFIX)) { + isTypesystemTable = Arrays.stream(TYPE_SYSTEM_RELATED_TYPES) + .anyMatch(t -> StringUtils.startsWithIgnoreCase(tableName, t)); + } + if (isTypesystemTable) { + + final String additionalSuffix = getAdditionalSuffix(tableName, repository.getDatabaseProvider()); + final String tableNameWithoutAdditionalSuffix = getTableNameWithoutAdditionalSuffix(tableName, + additionalSuffix); + final String typeSystemSuffix = repository.getDataSourceConfiguration().getTypeSystemSuffix(); + LOG.debug("$$TS table name=" + tableName + ",additionalSuffix=" + additionalSuffix + + ", tableNameWithoutAdditionalSuffix=" + tableNameWithoutAdditionalSuffix + ",typeSystemSuffix=" + + typeSystemSuffix); + LOG.debug("$$TS check=" + + !StringUtils.endsWithIgnoreCase(tableNameWithoutAdditionalSuffix, typeSystemSuffix)); + return !StringUtils.endsWithIgnoreCase(tableNameWithoutAdditionalSuffix, typeSystemSuffix); + } + return false; + } + + private Set getTableCandidates(final MigrationContext context, final DataRepository repository) + throws Exception { + final Set allTableNames = repository.getAllTableNames(); + + LOG.debug("$$ALL TABLES...getTableCandidates " + allTableNames); + final Set tableCandidates = new TreeSet<>(tableCandidateComparator); + + // add meta tables tableCandidates.add(createTableCandidate(repository, CommercedbsyncConstants.DEPLOYMENTS_TABLE)); tableCandidates.add(createTableCandidate(repository, "aclentries")); tableCandidates.add(createTableCandidate(repository, "configitems")); tableCandidates.add(createTableCandidate(repository, "numberseries")); tableCandidates.add(createTableCandidate(repository, "metainformations")); - //add tables listed in "ydeployments" - final Set allTypeSystemTables = repository.getAllTypeSystemTables(); + // add tables listed in "ydeployments" + final Set allTypeSystemTables = repository.getAllTypeSystemTables(); allTypeSystemTables.forEach(t -> { tableCandidates.add(createTableCandidate(repository, t.getTableName())); - final String propsTableName = t.getPropsTableName(); + final String propsTableName = t.getPropsTableName(); if (StringUtils.isNotEmpty(propsTableName)) { tableCandidates.add(createTableCandidate(repository, t.getPropsTableName())); } - final TableCandidate lpTable = createTableCandidate(repository, t.getTableName() + LP_SUFFIX); - - if (allTableNames.stream().anyMatch(lpTable.getFullTableName()::equalsIgnoreCase)) { - LOG.debug("LP table Match... " + lpTable.getFullTableName()); - tableCandidates.add(lpTable); - } - // ORACLE_TARGET -END - if (shouldMigrateAuditTable(context, t.getAuditTableName())) { - final TableCandidate auditTable = createTableCandidate(repository, t.getAuditTableName()); - - // ORACLE_TARGET - START..needs to be tested.Case insensitive - // match - if (allTableNames.stream().anyMatch(auditTable.getFullTableName()::equalsIgnoreCase)) { - tableCandidates.add(lpTable); - } - // ORACLE_TARGET - END - } - }); - - // custom tables - if (CollectionUtils.isNotEmpty(context.getCustomTables())) { - tableCandidates.addAll(context.getCustomTables().stream().map(t -> createTableCandidate(repository, t)) - .collect(Collectors.toSet())); - } + final TableCandidate lpTable = createTableCandidate(repository, t.getTableName() + LP_SUFFIX); + + if (allTableNames.stream().anyMatch(lpTable.getFullTableName()::equalsIgnoreCase)) { + LOG.debug("LP table Match... " + lpTable.getFullTableName()); + tableCandidates.add(lpTable); + } + + if (shouldMigrateAuditTable(context, t.getAuditTableName())) { + final TableCandidate auditTable = createTableCandidate(repository, t.getAuditTableName()); + + if (allTableNames.contains(auditTable.getFullTableName())) { + tableCandidates.add(auditTable); + } + } + }); + + // custom tables + if (CollectionUtils.isNotEmpty(context.getCustomTables())) { + tableCandidates.addAll(context.getCustomTables().stream().map(t -> createTableCandidate(repository, t)) + .collect(Collectors.toSet())); + } return tableCandidates; } - private TableCandidate createTableCandidate(final DataRepository repository, final String tableName) { - final TableCandidate candidate = new TableCandidate(); - - final String additionalSuffix = getAdditionalSuffix(tableName, repository.getDatabaseProvider()); - final String tableNameWithoutAdditionalSuffix = getTableNameWithoutAdditionalSuffix(tableName, - additionalSuffix); - final String baseTableName = getTableNameWithoutTypeSystemSuffix(tableNameWithoutAdditionalSuffix, - repository.getDataSourceConfiguration().getTypeSystemSuffix()); - final boolean isTypeSystemRelatedTable = isTypeSystemRelatedTable(baseTableName); - candidate.setCommonTableName(baseTableName + additionalSuffix); - candidate.setTableName(tableName); - candidate.setFullTableName(repository.getDataSourceConfiguration().getTablePrefix() + tableName); - candidate.setAdditionalSuffix(additionalSuffix); - candidate.setBaseTableName(baseTableName); - candidate.setTypeSystemRelatedTable(isTypeSystemRelatedTable); - return candidate; - } - - private boolean isTypeSystemRelatedTable(final String tableName) { - return Arrays.stream(TYPE_SYSTEM_RELATED_TYPES).anyMatch(tableName::equalsIgnoreCase); - } - - private String getAdditionalSuffix(final String tableName, final DataBaseProvider dataBaseProvider) { - // ORACLE_TARGET - START - if (dataBaseProvider.isOracleUsed() && (StringUtils.endsWith(tableName, LP_SUFFIX_UPPER))) { - return LP_SUFFIX_UPPER; - }// ORACLE_TARGET - END - else if(dataBaseProvider.isHanaUsed() && (StringUtils.endsWith(tableName, LP_SUFFIX_UPPER))){ - return LP_SUFFIX_UPPER; - }else if (StringUtils.endsWithIgnoreCase(tableName, LP_SUFFIX)) { - return LP_SUFFIX; - } else { - return StringUtils.EMPTY; - } - } - - private String getTableNameWithoutTypeSystemSuffix(final String tableName, final String suffix) { - return StringUtils.removeEnd(tableName, suffix); - } - - private String getTableNameWithoutAdditionalSuffix(final String tableName, final String suffix) { - return StringUtils.removeEnd(tableName, suffix); - } - - private Set createCopyItems(final MigrationContext context, - final Set sourceTablesToMigrate, final Map targetTablesToMigrate) throws SQLException { - final Set copyItems = new HashSet<>(); - for (final TableCandidate sourceTableToMigrate : sourceTablesToMigrate) { - final String targetTableKey = sourceTableToMigrate.getCommonTableName().toLowerCase(); - - LOG.debug("Eligible Tables to Migrate =" + targetTableKey); - if (targetTablesToMigrate.containsKey(targetTableKey)) { - final TableCandidate targetTableToMigrate = targetTablesToMigrate.get(targetTableKey); - copyItems.add(createCopyItem(context, sourceTableToMigrate, targetTableToMigrate)); - } else { - throw new IllegalStateException("Target table must exists"); - } - } - return copyItems; - } - - private CopyContext.DataCopyItem createCopyItem(final MigrationContext context, final TableCandidate sourceTable, - final TableCandidate targetTable) throws SQLException { - final String sourceTableName = sourceTable.getFullTableName(); - final String targetTableName = targetTable.getFullTableName(); - DataRepository sds = context.getDataSourceRepository(); - String sTableName = context.getItemTypeViewNameByTable(sourceTableName, sds); - final CopyContext.DataCopyItem dataCopyItem = new CopyContext.DataCopyItem(sTableName, targetTableName); - addColumnMappingsIfNecessary(context, sourceTable, dataCopyItem); - return dataCopyItem; - } - - private void addColumnMappingsIfNecessary(final MigrationContext context, final TableCandidate sourceTable, - final CopyContext.DataCopyItem dataCopyItem) { - if (sourceTable.getCommonTableName().equalsIgnoreCase(CommercedbsyncConstants.DEPLOYMENTS_TABLE)) { - final String sourceTypeSystemName = context.getDataSourceRepository().getDataSourceConfiguration() - .getTypeSystemName(); - final String targetTypeSystemName = context.getDataTargetRepository().getDataSourceConfiguration() - .getTypeSystemName(); - // Add mapping to override the TypeSystemName value in target table - if (!sourceTypeSystemName.equalsIgnoreCase(targetTypeSystemName)) { - dataCopyItem.getColumnMap().put("TypeSystemName", targetTypeSystemName); - } - } - } - - public void setDataCopyTableFilter(final DataCopyTableFilter dataCopyTableFilter) { - this.dataCopyTableFilter = dataCopyTableFilter; - } + private TableCandidate createTableCandidate(final DataRepository repository, final String tableName) { + final TableCandidate candidate = new TableCandidate(); + + final String additionalSuffix = getAdditionalSuffix(tableName, repository.getDatabaseProvider()); + final String tableNameWithoutAdditionalSuffix = getTableNameWithoutAdditionalSuffix(tableName, + additionalSuffix); + final String baseTableName = getTableNameWithoutTypeSystemSuffix(tableNameWithoutAdditionalSuffix, + repository.getDataSourceConfiguration().getTypeSystemSuffix()); + final boolean isTypeSystemRelatedTable = isTypeSystemRelatedTable(baseTableName); + candidate.setCommonTableName(baseTableName + additionalSuffix); + candidate.setTableName(tableName); + candidate.setFullTableName(repository.getDataSourceConfiguration().getTablePrefix() + tableName); + candidate.setAdditionalSuffix(additionalSuffix); + candidate.setBaseTableName(baseTableName); + candidate.setTypeSystemRelatedTable(isTypeSystemRelatedTable); + return candidate; + } + + private boolean isTypeSystemRelatedTable(final String tableName) { + return Arrays.stream(TYPE_SYSTEM_RELATED_TYPES).anyMatch(tableName::equalsIgnoreCase); + } + + private String getAdditionalSuffix(final String tableName, final DataBaseProvider dataBaseProvider) { + if ((dataBaseProvider.isOracleUsed() || dataBaseProvider.isHanaUsed()) + && StringUtils.endsWith(tableName, LP_SUFFIX_UPPER)) { + return LP_SUFFIX_UPPER; + } else if (StringUtils.endsWithIgnoreCase(tableName, LP_SUFFIX)) { + return LP_SUFFIX; + } else { + return StringUtils.EMPTY; + } + } + + private String getTableNameWithoutTypeSystemSuffix(final String tableName, final String suffix) { + return StringUtils.removeEnd(tableName, suffix); + } + + private String getTableNameWithoutAdditionalSuffix(final String tableName, final String suffix) { + return StringUtils.removeEnd(tableName, suffix); + } + + private Set createCopyItems(final MigrationContext context, + final Set sourceTablesToMigrate, final Map targetTablesToMigrate) + throws SQLException { + final Set copyItems = new HashSet<>(); + for (final TableCandidate sourceTableToMigrate : sourceTablesToMigrate) { + final String targetTableKey = sourceTableToMigrate.getCommonTableName().toLowerCase(); + + LOG.debug("Eligible Tables to Migrate = " + targetTableKey); + if (targetTablesToMigrate.containsKey(targetTableKey)) { + final TableCandidate targetTableToMigrate = targetTablesToMigrate.get(targetTableKey); + copyItems.add(createCopyItem(context, sourceTableToMigrate, targetTableToMigrate)); + } else { + throw new IllegalStateException("Target table must exists"); + } + } + return copyItems; + } + + private CopyContext.DataCopyItem createCopyItem(final MigrationContext context, final TableCandidate sourceTable, + final TableCandidate targetTable) throws SQLException { + final String sourceTableName = sourceTable.getFullTableName(); + final String targetTableName = targetTable.getFullTableName(); + DataRepository sds = context.getDataSourceRepository(); + String sTableName = context.getItemTypeViewNameByTable(sourceTableName, sds); + final CopyContext.DataCopyItem dataCopyItem = new CopyContext.DataCopyItem(sTableName, targetTableName); + addColumnMappingsIfNecessary(context, sourceTable, dataCopyItem); + return dataCopyItem; + } + + private void addColumnMappingsIfNecessary(final MigrationContext context, final TableCandidate sourceTable, + final CopyContext.DataCopyItem dataCopyItem) { + if (sourceTable.getCommonTableName().equalsIgnoreCase(CommercedbsyncConstants.DEPLOYMENTS_TABLE)) { + final String sourceTypeSystemName = context.getDataSourceRepository().getDataSourceConfiguration() + .getTypeSystemName(); + final String targetTypeSystemName = context.getDataTargetRepository().getDataSourceConfiguration() + .getTypeSystemName(); + // Add mapping to override the TypeSystemName value in target table + if (!sourceTypeSystemName.equalsIgnoreCase(targetTypeSystemName)) { + dataCopyItem.getColumnMap().put("TypeSystemName", targetTypeSystemName); + } + } + } + + public void setDataCopyTableFilter(final DataCopyTableFilter dataCopyTableFilter) { + this.dataCopyTableFilter = dataCopyTableFilter; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java index d9c9af7..fbaf235 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/DataRepository.java @@ -1,13 +1,13 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.repository; - import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.logging.JDBCQueriesStore; import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; import de.hybris.bootstrap.ddl.DataBaseProvider; import org.apache.ddlutils.Platform; @@ -22,18 +22,22 @@ import java.sql.Connection; import java.sql.SQLException; import java.time.Instant; +import java.util.List; import java.util.Set; /** * */ public interface DataRepository { + + String getDatabaseTimezone(); + Database asDatabase(); Database asDatabase(boolean reload); Set getAllTableNames() throws Exception; - + Set getAllViewNames() throws SQLException; Set getAllTypeSystemTables() throws Exception; @@ -54,8 +58,9 @@ public interface DataRepository { long getRowCount(String table) throws Exception; - long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, boolean lpTableMigrationEnabled) throws SQLException; - + long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, + boolean lpTableMigrationEnabled) throws SQLException; + long getRowCountModifiedAfter(String table, Instant time) throws SQLException; DataSet getAll(String table) throws Exception; @@ -65,11 +70,11 @@ public interface DataRepository { DataSourceConfiguration getDataSourceConfiguration(); int executeUpdateAndCommit(String updateStatement) throws Exception; - + int executeUpdateAndCommitOnPrimary(String updateStatement) throws Exception; void runSqlScript(final Resource resource); - + void runSqlScriptOnPrimary(final Resource resource); float getDatabaseUtilization() throws SQLException; @@ -91,12 +96,26 @@ public interface DataRepository { Connection getConnection() throws Exception; DataSource getDataSource(); - + DataSource getDataSourcePrimary(); DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) throws Exception; DataSet getUniqueColumns(String table) throws Exception; - boolean validateConnection() throws SQLException; + boolean validateConnection() throws Exception; + + /** + * Get the store of JDBC queries associated with the datasouce + * + * @return store of JDBC queries associated with the datasouce + */ + JDBCQueriesStore getJdbcQueriesStore(); + + /** + * Clear the store of JDBC queries from all the entries it currently contains + */ + void clearJdbcQueriesStore(); + + String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java index 0b21bb0..e57a5d8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AbstractDataRepository.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -23,11 +23,19 @@ import java.util.Map; import java.util.Objects; import java.util.Set; +import java.util.StringTokenizer; import java.util.TreeSet; import java.util.concurrent.ConcurrentHashMap; +import java.util.stream.Collectors; import javax.sql.DataSource; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.logging.JDBCQueriesStore; +import com.sap.cx.boosters.commercedbsync.logging.LoggingConnectionWrapper; +import de.hybris.bootstrap.ddl.PropertiesLoader; +import de.hybris.platform.core.Registry; +import de.hybris.platform.core.TenantPropertiesLoader; import org.apache.commons.lang3.StringUtils; import org.apache.ddlutils.Platform; import org.apache.ddlutils.model.Database; @@ -57,28 +65,40 @@ import de.hybris.bootstrap.ddl.tools.persistenceinfo.PersistenceInformation; /** - * Base information an a + * Implementation of basic operations for accessing repositories */ public abstract class AbstractDataRepository implements DataRepository { private static final Logger LOG = LoggerFactory.getLogger(AbstractDataRepository.class); + public static final String LP_SUFFIX = "lp"; + + // one store per data repository + private final JDBCQueriesStore jdbcQueriesStore; private final Map dataSourceHolder = new ConcurrentHashMap<>(); private final DataSourceConfiguration dataSourceConfiguration; private final MigrationDataSourceFactory migrationDataSourceFactory; private final DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService; + private final MigrationContext migrationContext; private Platform platform; private Database database; - public AbstractDataRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - this(dataSourceConfiguration, databaseMigrationDataTypeMapperService, new DefaultMigrationDataSourceFactory()); + protected AbstractDataRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + this(migrationContext, dataSourceConfiguration, new DefaultMigrationDataSourceFactory(), + databaseMigrationDataTypeMapperService); } - public AbstractDataRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService, MigrationDataSourceFactory migrationDataSourceFactory) { + protected AbstractDataRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + MigrationDataSourceFactory migrationDataSourceFactory, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + this.migrationContext = migrationContext; this.dataSourceConfiguration = dataSourceConfiguration; this.migrationDataSourceFactory = migrationDataSourceFactory; this.databaseMigrationDataTypeMapperService = databaseMigrationDataTypeMapperService; + this.jdbcQueriesStore = new JDBCQueriesStore(getDataSourceConfiguration().getConnectionString(), + migrationContext, migrationContext.getInputProfiles().contains(dataSourceConfiguration.getProfile())); } @Override @@ -88,44 +108,45 @@ public DataSourceConfiguration getDataSourceConfiguration() { @Override public DataSource getDataSource() { - return dataSourceHolder.computeIfAbsent("DATASOURCE", s -> migrationDataSourceFactory.create(dataSourceConfiguration)); - } - - @Override - public DataSource getDataSourcePrimary() - { - return dataSourceHolder.computeIfAbsent("DATASOURCEPRIMARY", s -> { - DataSource primaryDataSource = getDataSource(); - final String connectionStringPrimary = dataSourceConfiguration.getConnectionStringPrimary(); - if(StringUtils.isNotBlank(connectionStringPrimary)) - { - final Map dataSourceConfigurationMap = new HashMap<>(); - dataSourceConfigurationMap.put("connection.url", connectionStringPrimary); - dataSourceConfigurationMap.put("driver", dataSourceConfiguration.getDriver()); - dataSourceConfigurationMap.put("username", dataSourceConfiguration.getUserName()); - dataSourceConfigurationMap.put("password", dataSourceConfiguration.getPassword()); - dataSourceConfigurationMap.put("pool.size.max", Integer.valueOf(1)); - dataSourceConfigurationMap.put("pool.size.idle.min", Integer.valueOf(1)); - dataSourceConfigurationMap.put("registerMbeans",Boolean.FALSE); - - primaryDataSource = migrationDataSourceFactory.create(dataSourceConfigurationMap); - } - - return primaryDataSource; - }); - } + return dataSourceHolder.computeIfAbsent("DATASOURCE", + s -> migrationDataSourceFactory.create(dataSourceConfiguration)); + } + + @Override + public DataSource getDataSourcePrimary() { + return dataSourceHolder.computeIfAbsent("DATASOURCEPRIMARY", s -> { + DataSource primaryDataSource = getDataSource(); + final String connectionStringPrimary = dataSourceConfiguration.getConnectionStringPrimary(); + if (StringUtils.isNotBlank(connectionStringPrimary)) { + final Map dataSourceConfigurationMap = new HashMap<>(); + dataSourceConfigurationMap.put("connection.url", connectionStringPrimary); + dataSourceConfigurationMap.put("driver", dataSourceConfiguration.getDriver()); + dataSourceConfigurationMap.put("username", dataSourceConfiguration.getUserName()); + dataSourceConfigurationMap.put("password", dataSourceConfiguration.getPassword()); + dataSourceConfigurationMap.put("pool.size.max", Integer.valueOf(1)); + dataSourceConfigurationMap.put("pool.size.idle.min", Integer.valueOf(1)); + dataSourceConfigurationMap.put("registerMbeans", Boolean.FALSE); + + primaryDataSource = migrationDataSourceFactory.create(dataSourceConfigurationMap); + } + + return primaryDataSource; + }); + } public Connection getConnection() throws SQLException { - Connection connection = getDataSource().getConnection(); - connection.setAutoCommit(false); - return connection; + // Only wrap with the logging behavior if logSql is true + if (migrationContext.isLogSql()) { + boolean logParameters = jdbcQueriesStore.isSourceDB() && migrationContext.isLogSqlParamsForSource(); + return new LoggingConnectionWrapper(getDataSource().getConnection(), jdbcQueriesStore, logParameters); + } else { + return getDataSource().getConnection(); + } } @Override public int executeUpdateAndCommit(String updateStatement) throws SQLException { - try (Connection conn = getConnectionForUpdateAndCommit(); - Statement statement = conn.createStatement() - ) { + try (Connection conn = getConnectionForUpdateAndCommit(); Statement statement = conn.createStatement()) { return statement.executeUpdate(updateStatement); } } @@ -136,22 +157,19 @@ public Connection getConnectionForUpdateAndCommit() throws SQLException { return connection; } - @Override - public int executeUpdateAndCommitOnPrimary(final String updateStatement) throws SQLException - { - try (final Connection conn = getConnectionFromPrimary(); final Statement statement = conn.createStatement()) - { - return statement.executeUpdate(updateStatement); - } - } - - public Connection getConnectionFromPrimary() throws SQLException - { - final Connection connection = getDataSourcePrimary().getConnection(); - connection.setAutoCommit(true); - return connection; - } - + @Override + public int executeUpdateAndCommitOnPrimary(final String updateStatement) throws SQLException { + try (final Connection conn = getConnectionFromPrimary(); final Statement statement = conn.createStatement()) { + return statement.executeUpdate(updateStatement); + } + } + + public Connection getConnectionFromPrimary() throws SQLException { + final Connection connection = getDataSourcePrimary().getConnection(); + connection.setAutoCommit(true); + return connection; + } + @Override public void runSqlScript(Resource resource) { final ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator(resource); @@ -165,7 +183,7 @@ public void runSqlScriptOnPrimary(Resource resource) { databasePopulator.setIgnoreFailedDrops(true); databasePopulator.execute(getDataSourcePrimary()); } - + @Override public float getDatabaseUtilization() throws SQLException { throw new UnsupportedOperationException("Must be added in the specific repository implementation"); @@ -181,13 +199,13 @@ public long getRowCount(String table) throws SQLException { List conditionsList = new ArrayList<>(1); processDefaultConditions(table, conditionsList); String[] conditions = null; - if (conditionsList.size() > 0) { + if (!conditionsList.isEmpty()) { conditions = conditionsList.toArray(new String[conditionsList.size()]); } try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(String.format("select count(*) from %s where %s", table, expandConditions(conditions))) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery( + String.format("select count(*) from %s where %s", table, expandConditions(conditions)))) { long value = 0; if (resultSet.next()) { value = resultSet.getLong(1); @@ -197,9 +215,9 @@ public long getRowCount(String table) throws SQLException { } @Override - public long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, boolean lpTableMigrationEnabled) - throws SQLException { - return getRowCountModifiedAfter(table,time); + public long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, + boolean lpTableMigrationEnabled) throws SQLException { + return getRowCountModifiedAfter(table, time); } @Override @@ -207,11 +225,12 @@ public long getRowCountModifiedAfter(String table, Instant time) throws SQLExcep List conditionsList = new ArrayList<>(1); processDefaultConditions(table, conditionsList); String[] conditions = null; - if (conditionsList.size() > 0) { + if (!conditionsList.isEmpty()) { conditions = conditionsList.toArray(new String[conditionsList.size()]); } try (Connection connection = getConnection()) { - try (PreparedStatement stmt = connection.prepareStatement(String.format("select count(*) from %s where modifiedts > ? AND %s", table, expandConditions(conditions)))) { + try (PreparedStatement stmt = connection.prepareStatement(String.format( + "select count(*) from %s where modifiedts > ? AND %s", table, expandConditions(conditions)))) { stmt.setTimestamp(1, Timestamp.from(time)); ResultSet resultSet = stmt.executeQuery(); long value = 0; @@ -228,13 +247,13 @@ public DataSet getAll(String table) throws Exception { List conditionsList = new ArrayList<>(1); processDefaultConditions(table, conditionsList); String[] conditions = null; - if (conditionsList.size() > 0) { + if (!conditionsList.isEmpty()) { conditions = conditionsList.toArray(new String[conditionsList.size()]); } try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(String.format("select * from %s where %s", table, expandConditions(conditions))) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery( + String.format("select * from %s where %s", table, expandConditions(conditions)))) { return convertToDataSet(resultSet); } } @@ -244,11 +263,12 @@ public DataSet getAllModifiedAfter(String table, Instant time) throws Exception List conditionsList = new ArrayList<>(1); processDefaultConditions(table, conditionsList); String[] conditions = null; - if (conditionsList.size() > 0) { + if (!conditionsList.isEmpty()) { conditions = conditionsList.toArray(new String[conditionsList.size()]); } try (Connection connection = getConnection()) { - try (PreparedStatement stmt = connection.prepareStatement(String.format("select * from %s where modifiedts > ? and %s", table, expandConditions(conditions)))) { + try (PreparedStatement stmt = connection.prepareStatement(String + .format("select * from %s where modifiedts > ? and %s", table, expandConditions(conditions)))) { stmt.setTimestamp(1, Timestamp.from(time)); ResultSet resultSet = stmt.executeQuery(); return convertToDataSet(resultSet); @@ -260,7 +280,16 @@ protected DefaultDataSet convertToDataSet(ResultSet resultSet) throws Exception return convertToDataSet(resultSet, Collections.emptySet()); } + protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet) throws Exception { + return convertToDataSet(batchId, resultSet, Collections.emptySet()); + } + protected DefaultDataSet convertToDataSet(ResultSet resultSet, Set ignoreColumns) throws Exception { + return convertToDataSet(0, resultSet, ignoreColumns); + } + + protected DefaultDataSet convertToDataSet(int batchId, ResultSet resultSet, Set ignoreColumns) + throws Exception { int realColumnCount = resultSet.getMetaData().getColumnCount(); List columnOrder = new ArrayList<>(); int columnCount = 0; @@ -281,69 +310,92 @@ protected DefaultDataSet convertToDataSet(ResultSet resultSet, Set ignor for (DataColumn dataColumn : columnOrder) { int idx = resultSet.findColumn(dataColumn.getColumnName()); Object object = resultSet.getObject(idx); - //TODO: improve CLOB/BLOB handling - Object mappedValue = databaseMigrationDataTypeMapperService.dataTypeMapper(object, resultSet.getMetaData().getColumnType(idx)); + // TODO: improve CLOB/BLOB handling + Object mappedValue = databaseMigrationDataTypeMapperService.dataTypeMapper(object, + resultSet.getMetaData().getColumnType(idx)); row.add(mappedValue); } results.add(row); } - return new DefaultDataSet(columnCount, columnOrder, results); + return new DefaultDataSet(batchId, columnCount, columnOrder, results); } @Override public void disableIndexesOfTable(String table) throws SQLException { + final String disableIndexesScript; + + try { + disableIndexesScript = getDisableIndexesScript(table); + } catch (UnsupportedOperationException ignored) { + LOG.debug("Disable index operation is not supported for '{}' database", getDatabaseProvider().getDbName()); + return; + } + try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(getDisableIndexesScript(table)) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(disableIndexesScript)) { while (resultSet.next()) { - String q = resultSet.getString(1); - LOG.debug("Running query: {}", q); - executeUpdateAndCommit(q); + runIndexQuery(resultSet); } } } @Override public void enableIndexesOfTable(String table) throws SQLException { + final String enableIndexesScript; + + try { + enableIndexesScript = getEnableIndexesScript(table); + } catch (UnsupportedOperationException ignored) { + LOG.debug("Enable index operation is not supported for '{}' database", getDatabaseProvider().getDbName()); + return; + } + try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(getEnableIndexesScript(table)) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(enableIndexesScript)) { while (resultSet.next()) { - String q = resultSet.getString(1); - LOG.debug("Running query: {}", q); - executeUpdateAndCommit(q); + runIndexQuery(resultSet); } } } @Override public void dropIndexesOfTable(String table) throws SQLException { + final String dropIndexesScript; + + try { + dropIndexesScript = getDropIndexesScript(table); + } catch (UnsupportedOperationException ignored) { + LOG.debug("Drop index operation is not supported for '{}' database", getDatabaseProvider().getDbName()); + return; + } + try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(getDropIndexesScript(table)) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(dropIndexesScript)) { while (resultSet.next()) { - String q = resultSet.getString(1); - LOG.debug("Running query: {}", q); - executeUpdateAndCommit(q); + runIndexQuery(resultSet); } } } - protected String getDisableIndexesScript(String table) { - throw new UnsupportedOperationException("not implemented"); - + private void runIndexQuery(ResultSet resultSet) throws SQLException { + String q = resultSet.getString(1); + LOG.debug("Running query: {}", q); + executeUpdateAndCommit(q); + } + protected String getDisableIndexesScript(String table) { + throw new UnsupportedOperationException(); } protected String getEnableIndexesScript(String table) { - throw new UnsupportedOperationException("not implemented"); + throw new UnsupportedOperationException(); } protected String getDropIndexesScript(String table) { - throw new UnsupportedOperationException("not implemented"); + throw new UnsupportedOperationException(); } @Override @@ -353,9 +405,13 @@ public Platform asPlatform() { @Override public Platform asPlatform(boolean reload) { - //TODO all properties to be set and check + // TODO all properties to be set and check if (this.platform == null || reload) { - final DatabaseSettings databaseSettings = new DatabaseSettings(getDatabaseProvider(), getDataSourceConfiguration().getConnectionString(), getDataSourceConfiguration().getDriver(), getDataSourceConfiguration().getUserName(), getDataSourceConfiguration().getPassword(), getDataSourceConfiguration().getTablePrefix(), ";"); + final PropertiesLoader propertiesLoader = new TenantPropertiesLoader(Registry.getMasterTenant()); + final DatabaseSettings databaseSettings = new DatabaseSettings(getDatabaseProvider(), + getDataSourceConfiguration().getConnectionString(), getDataSourceConfiguration().getDriver(), + getDataSourceConfiguration().getUserName(), getDataSourceConfiguration().getPassword(), + getDataSourceConfiguration().getTablePrefix(), propertiesLoader, ";"); this.platform = createPlatform(databaseSettings, getDataSource()); addCustomPlatformTypeMapping(this.platform); } @@ -366,7 +422,6 @@ protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource return HybrisPlatformFactory.createInstance(databaseSettings, dataSource); } - protected void addCustomPlatformTypeMapping(Platform platform) { } @@ -385,8 +440,7 @@ public Database asDatabase(boolean reload) { protected Database getDatabase(boolean reload) { String schema = getDataSourceConfiguration().getSchema(); - return asPlatform(reload).readModelFromDatabase(getDataSourceConfiguration().getProfile(), null, - schema, null); + return asPlatform(reload).readModelFromDatabase(getDataSourceConfiguration().getProfile(), null, schema, null); } @Override @@ -394,9 +448,8 @@ public Set getAllTableNames() throws SQLException { Set allTableNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); String allTableNamesQuery = createAllTableNamesQuery(); try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(allTableNamesQuery) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(allTableNamesQuery)) { while (resultSet.next()) { String tableName = resultSet.getString(1); if (!StringUtils.startsWithIgnoreCase(tableName, CommercedbsyncConstants.MIGRATION_TABLESPREFIX)) { @@ -413,17 +466,19 @@ public Set getAllTypeSystemTables() throws SQLException { throw new RuntimeException("No type system name specified. Check the properties"); } String tablePrefix = getDataSourceConfiguration().getTablePrefix(); - String yDeploymentsTable = StringUtils.defaultIfBlank(tablePrefix, "") + CommercedbsyncConstants.DEPLOYMENTS_TABLE; + String yDeploymentsTable = StringUtils.defaultIfBlank(tablePrefix, "") + + CommercedbsyncConstants.DEPLOYMENTS_TABLE; Set allTableNames = getAllTableNames(); if (!allTableNames.contains(yDeploymentsTable)) { return Collections.emptySet(); } - String allTypeSystemTablesQuery = String.format("SELECT * FROM %s WHERE Typecode IS NOT NULL AND TableName IS NOT NULL AND TypeSystemName = '%s'", yDeploymentsTable, getDataSourceConfiguration().getTypeSystemName()); + String allTypeSystemTablesQuery = String.format( + "SELECT * FROM %s WHERE Typecode IS NOT NULL AND TableName IS NOT NULL AND TypeSystemName = '%s'", + yDeploymentsTable, getDataSourceConfiguration().getTypeSystemName()); Set allTypeSystemTables = new HashSet<>(); try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(allTypeSystemTablesQuery) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(allTypeSystemTablesQuery)) { while (resultSet.next()) { TypeSystemTable typeSystemTable = new TypeSystemTable(); String name = resultSet.getString("Name"); @@ -463,10 +518,9 @@ private String detectTypeSystemSuffix(String tableName, String name) { @Override public boolean isAuditTable(String table) throws Exception { String tablePrefix = getDataSourceConfiguration().getTablePrefix(); - String query = String.format("SELECT count(*) from %s%s WHERE AuditTableName = ? OR AuditTableName = ?", StringUtils.defaultIfBlank(tablePrefix, ""), CommercedbsyncConstants.DEPLOYMENTS_TABLE); - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(query); - ) { + String query = String.format("SELECT count(*) from %s%s WHERE AuditTableName = ? OR AuditTableName = ?", + StringUtils.defaultIfBlank(tablePrefix, ""), CommercedbsyncConstants.DEPLOYMENTS_TABLE); + try (Connection connection = getConnection(); PreparedStatement stmt = connection.prepareStatement(query)) { stmt.setObject(1, StringUtils.removeStartIgnoreCase(table, tablePrefix)); stmt.setObject(2, table); try (ResultSet rs = stmt.executeQuery()) { @@ -488,9 +542,8 @@ public Set getAllColumnNames(String table) throws SQLException { String allColumnNamesQuery = createAllColumnNamesQuery(table); Set allColumnNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(allColumnNamesQuery) - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(allColumnNamesQuery)) { while (resultSet.next()) { allColumnNames.add(resultSet.getString(1)); } @@ -507,34 +560,36 @@ public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition) @Override public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { - //get batches with modifiedts >= configured time for incremental migration + // get batches with modifiedts >= configured time for incremental migration List conditionsList = new ArrayList<>(1); processDefaultConditions(queryDefinition.getTable(), conditionsList); if (time != null) { conditionsList.add("modifiedts > ?"); } String[] conditions = null; - if (conditionsList.size() > 0) { + if (!conditionsList.isEmpty()) { conditions = conditionsList.toArray(new String[conditionsList.size()]); - if(LOG.isDebugEnabled()) - { - LOG.debug("Batch query conditions {}", Arrays.toString(conditions)); + if (LOG.isDebugEnabled()) { + LOG.debug("Batch query conditions {}", Arrays.toString(conditions)); } - } - LOG.debug("Batch query table: {}, offset: {}, batchSize: {}, orderByColumns: {}", queryDefinition.getTable(), queryDefinition.getOffset(), queryDefinition.getBatchSize(), queryDefinition.getOrderByColumns()); + } + LOG.debug("Batch query table: {}, offset: {}, batchSize: {}, orderByColumns: {}", queryDefinition.getTable(), + queryDefinition.getOffset(), queryDefinition.getBatchSize(), queryDefinition.getOrderByColumns()); try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildOffsetBatchQuery(queryDefinition, conditions))) { + PreparedStatement stmt = connection + .prepareStatement(buildOffsetBatchQuery(queryDefinition, conditions))) { stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + int paramIdx = 0; if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - stmt.setLong(2, queryDefinition.getOffset()); - stmt.setLong(3, queryDefinition.getBatchSize()); - } else { - stmt.setLong(1, queryDefinition.getOffset()); - stmt.setLong(2, queryDefinition.getBatchSize()); + stmt.setTimestamp(++paramIdx, Timestamp.from(time)); + } + + if (hasParameterizedOffsetBatchQuery()) { + stmt.setLong(++paramIdx, queryDefinition.getOffset()); + stmt.setLong(++paramIdx, queryDefinition.getBatchSize()); } ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); } } @@ -545,7 +600,7 @@ public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition) thro @Override public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Instant time) throws Exception { - //get batches with modifiedts >= configured time for incremental migration + // get batches with modifiedts >= configured time for incremental migration List conditionsList = new ArrayList<>(2); processDefaultConditions(queryDefinition.getTable(), conditionsList); int conditionIndex = 1; @@ -556,12 +611,12 @@ public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Inst } int lastColumnConditionIndex = 0; if (queryDefinition.getLastColumnValue() != null) { - conditionsList.add(String.format("%s >= ?", queryDefinition.getColumn())); + conditionsList.add(String.format(getLastValueCondition(), queryDefinition.getColumn())); lastColumnConditionIndex = conditionIndex++; } int nextColumnConditionIndex = 0; if (queryDefinition.getNextColumnValue() != null) { - conditionsList.add(String.format("%s < ?", queryDefinition.getColumn())); + conditionsList.add(String.format(getNextValueCondition(), queryDefinition.getColumn())); nextColumnConditionIndex = conditionIndex++; } String[] conditions = null; @@ -569,22 +624,30 @@ public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Inst conditions = conditionsList.toArray(new String[conditionsList.size()]); } try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildValueBatchQuery(queryDefinition, conditions))) { + PreparedStatement stmt = connection + .prepareStatement(buildValueBatchQuery(queryDefinition, conditions))) { stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); if (timeConditionIndex > 0) { stmt.setTimestamp(timeConditionIndex, Timestamp.from(time)); } if (lastColumnConditionIndex > 0) { - stmt.setObject(lastColumnConditionIndex, queryDefinition.getLastColumnValue()); + stmt.setObject(lastColumnConditionIndex, queryDefinition.getLastColumnValue()); } if (nextColumnConditionIndex > 0) { - stmt.setObject(nextColumnConditionIndex, queryDefinition.getNextColumnValue()); - } + stmt.setObject(nextColumnConditionIndex, queryDefinition.getNextColumnValue()); + } ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); } } + protected String getLastValueCondition() { + return "%s >= ?"; + } + + protected String getNextValueCondition() { + return "%s < ?"; + } @Override public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition) throws Exception { @@ -592,35 +655,38 @@ public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefini } @Override - public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) throws Exception { - //get batches with modifiedts >= configured time for incremental migration + public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) + throws Exception { + // get batches with modifiedts >= configured time for incremental migration List conditionsList = new ArrayList<>(2); processDefaultConditions(queryDefinition.getTable(), conditionsList); if (time != null) { conditionsList.add("modifiedts > ?"); } String[] conditions = null; - if (conditionsList.size() > 0) { + if (!conditionsList.isEmpty()) { conditions = conditionsList.toArray(new String[conditionsList.size()]); } try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildBatchMarkersQuery(queryDefinition, conditions))) { + PreparedStatement stmt = connection + .prepareStatement(buildBatchMarkersQuery(queryDefinition, conditions))) { stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + int paramIdx = 0; if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - stmt.setLong(2, queryDefinition.getBatchSize()); - } else { - stmt.setLong(1, queryDefinition.getBatchSize()); + stmt.setTimestamp(++paramIdx, Timestamp.from(time)); + } + if (hasParameterizedBatchMarkersQuery()) { + stmt.setLong(++paramIdx, queryDefinition.getBatchSize()); } + ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + return convertToBatchDataSet(0, resultSet); } } @Override public DataSet getUniqueColumns(String table) throws Exception { - try (Connection connection = getConnection(); - Statement stmt = connection.createStatement()) { + try (Connection connection = getConnection(); Statement stmt = connection.createStatement()) { ResultSet resultSet = stmt.executeQuery(createUniqueColumnsQuery(table)); return convertToDataSet(resultSet); } @@ -628,10 +694,18 @@ public DataSet getUniqueColumns(String table) throws Exception { protected abstract String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions); + protected boolean hasParameterizedOffsetBatchQuery() { + return false; + } + protected abstract String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions); protected abstract String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions); + protected boolean hasParameterizedBatchMarkersQuery() { + return false; + } + protected abstract String createUniqueColumnsQuery(String tableName); protected void processDefaultConditions(String table, List conditionsList) { @@ -641,7 +715,6 @@ protected void processDefaultConditions(String table, List conditionsLis } } - private String getTsCondition(String table) { Objects.requireNonNull(table); if (table.toLowerCase().endsWith(CommercedbsyncConstants.DEPLOYMENTS_TABLE)) { @@ -654,12 +727,12 @@ protected String expandConditions(String[] conditions) { if (conditions == null || conditions.length == 0) { return "1=1"; } else { - return Joiner.on(" and ").join(conditions); + return Joiner.on(" AND ").join(conditions); } } - protected DataSet convertToBatchDataSet(ResultSet resultSet) throws Exception { - return convertToDataSet(resultSet); + protected DataSet convertToBatchDataSet(int batchId, ResultSet resultSet) throws Exception { + return convertToDataSet(batchId, resultSet); } @Override @@ -668,13 +741,13 @@ public boolean validateConnection() throws SQLException { return connection.isValid(120); } } - + @Override public Set getAllViewNames() throws SQLException { Set allViewNames = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); try (Connection connection = getConnection()) { - DatabaseMetaData meta = connection.getMetaData(); - ResultSet resultSet = meta.getTables(null, null, "%", new String[] {"VIEW"}); + DatabaseMetaData meta = connection.getMetaData(); + ResultSet resultSet = meta.getTables(null, null, "%", new String[]{"VIEW"}); while (resultSet.next()) { String tableName = resultSet.getString("TABLE_NAME"); if (!StringUtils.startsWithIgnoreCase(tableName, CommercedbsyncConstants.MIGRATION_TABLESPREFIX)) { @@ -684,4 +757,40 @@ public Set getAllViewNames() throws SQLException { } return allViewNames; } + + public JDBCQueriesStore getJdbcQueriesStore() { + // the store will have no entries if dataSourceConfiguration.isLogSql() is false + return jdbcQueriesStore; + } + + protected Map getLocationMap() { + final String connectionString = getDataSourceConfiguration().getConnectionString(); + int endIndex = connectionString.indexOf('?'); + String newConnectionString = connectionString.substring(endIndex + 1); + List entries = getTokensWithCollection(newConnectionString, "&"); + + final Map locationMap = entries.stream().map(s -> s.split("=")) + .collect(Collectors.toMap(s -> s[0], s -> s[1])); + return locationMap; + } + + @Override + public void clearJdbcQueriesStore() { + jdbcQueriesStore.clear(); + } + + public List getTokensWithCollection(final String str, final String delimiter) { + return Collections.list(new StringTokenizer(str, delimiter)).stream().map(token -> (String) token) + .collect(Collectors.toList()); + } + + protected String getLpTableName(String tableName) { + return StringUtils.removeEndIgnoreCase(tableName, LP_SUFFIX); + } + + protected abstract String getBulkInsertStatementParamList(List columnsToCopy, + List columnsToCopyValues); + + protected abstract String getBulkUpdateStatementParamList(List columnsToCopy, + List columnsToCopyValues, List upsertIDs); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java index abe0a2e..21fe2e8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureDataRepository.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,10 +11,14 @@ import java.sql.SQLException; import java.sql.Statement; import java.sql.Types; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import javax.sql.DataSource; -import org.apache.commons.lang.StringUtils; +import com.google.common.base.Joiner; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import org.apache.ddlutils.Platform; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -34,10 +38,9 @@ public class AzureDataRepository extends AbstractDataRepository { private static final Logger LOG = LoggerFactory.getLogger(AzureDataRepository.class); - private static final String LP_SUFFIX = "lp"; - - public AzureDataRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + public AzureDataRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); } @Override @@ -57,23 +60,32 @@ protected void addCustomPlatformTypeMapping(Platform platform) { @Override protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - final String batchQuery = String.format("SELECT * FROM %s WHERE %s ORDER BY %s OFFSET ? ROWS FETCH NEXT ? ROWS ONLY", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOrderByColumns()); + final String batchQuery = String.format( + "SELECT * FROM %s WHERE %s ORDER BY %s OFFSET ? ROWS FETCH NEXT ? ROWS ONLY", + queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOrderByColumns()); LOG.debug("Executing batch query {}", batchQuery); return batchQuery; } + @Override + protected boolean hasParameterizedOffsetBatchQuery() { + return true; + } + @Override protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select top %s * from %s where %s order by %s", queryDefinition.getBatchSize(), queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getColumn()); + return String.format("SELECT TOP %s * FROM %s WHERE %s ORDER BY %s", queryDefinition.getBatchSize(), + queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getColumn()); } @Override protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { String column = queryDefinition.getColumn(); String tableName = queryDefinition.getTable(); - if (queryDefinition.isLpTableEnabled()){ + if (queryDefinition.isLpTableEnabled()) { tableName = getLpTableName(tableName); } + // spotless:off return String.format("SELECT t.%s, t.rownum\n" + "FROM\n" + "(\n" + @@ -81,13 +93,62 @@ protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, " FROM %s\n WHERE %s" + ") AS t\n" + "WHERE t.rownum %% ? = 0\n" + - "ORDER BY t.%s", column, column, column, tableName, expandConditions(conditions), column); + "ORDER BY t.%s", + // spotless:on + column, column, column, tableName, expandConditions(conditions), column); + } + + @Override + protected boolean hasParameterizedBatchMarkersQuery() { + return true; + } + + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + /* + * https://michaeljswart.com/2017/07/sql-server-upsert-patterns-and- + * antipatterns/ We are not using a stored procedure here as CCv2 does not grant + * sp exec permission to the default db user + */ + final StringBuilder sqlBuilder = new StringBuilder(); + + sqlBuilder.append(String.format("MERGE %s WITH (HOLDLOCK) AS t", table)); + sqlBuilder.append("\n"); + sqlBuilder.append(String.format("USING (SELECT %s) AS s ON ", + Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())))); + sqlBuilder.append(String.format("( %s )", upsertIDs.stream() + .map(column -> String.format(" t.%s = s.%s", column, column)).collect(Collectors.joining(" AND ")))); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN MATCHED THEN UPDATE"); // update + sqlBuilder.append("\n"); + sqlBuilder.append(getBulkUpdateStatementParamList(columnsToCopy, + columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()), upsertIDs)); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); // insert + sqlBuilder.append("\n"); + sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, + columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); + sqlBuilder.append(";"); + + return sqlBuilder.toString(); + } + + @Override + protected String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return "(" + String.join(", ", columnsToCopy) + ") VALUES (" + String.join(", ", columnsToCopyValues) + ")"; + } + + @Override + protected String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, + List upsertIDs) { + return "SET " + IntStream.range(0, columnsToCopy.size()) + .mapToObj(idx -> String.format("%s = %s", columnsToCopy.get(idx), columnsToCopyValues.get(idx))) + .collect(Collectors.joining(", ")); } @Override protected String createAllTableNamesQuery() { - return String.format( - "SELECT DISTINCT TABLE_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '%s'", + return String.format("SELECT DISTINCT TABLE_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '%s'", getDataSourceConfiguration().getSchema()); } @@ -100,30 +161,40 @@ protected String createAllColumnNamesQuery(String tableName) { @Override protected String getDisableIndexesScript(String table) { - return String.format("SELECT 'ALTER INDEX ' + QUOTENAME(I.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(T.schema_id))+'.'+ QUOTENAME(T.name) + ' DISABLE' FROM sys.indexes I INNER JOIN sys.tables T ON I.object_id = T.object_id WHERE I.type_desc = 'NONCLUSTERED' AND I.name IS NOT NULL AND I.is_disabled = 0 AND T.name = '%s'", table); + return String.format( + "SELECT 'ALTER INDEX ' + QUOTENAME(I.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(T.schema_id))+'.'+ QUOTENAME(T.name) + ' DISABLE' FROM sys.indexes I INNER JOIN sys.tables T ON I.object_id = T.object_id WHERE I.type_desc = 'NONCLUSTERED' AND I.name IS NOT NULL AND I.is_disabled = 0 AND T.name = '%s'", + table); } @Override protected String getEnableIndexesScript(String table) { - return String.format("SELECT 'ALTER INDEX ' + QUOTENAME(I.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(T.schema_id))+'.'+ QUOTENAME(T.name) + ' REBUILD' FROM sys.indexes I INNER JOIN sys.tables T ON I.object_id = T.object_id WHERE I.type_desc = 'NONCLUSTERED' AND I.name IS NOT NULL AND I.is_disabled = 1 AND T.name = '%s'", table); + return String.format( + "SELECT 'ALTER INDEX ' + QUOTENAME(I.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(T.schema_id))+'.'+ QUOTENAME(T.name) + ' REBUILD' FROM sys.indexes I INNER JOIN sys.tables T ON I.object_id = T.object_id WHERE I.type_desc = 'NONCLUSTERED' AND I.name IS NOT NULL AND I.is_disabled = 1 AND T.name = '%s'", + table); } @Override protected String getDropIndexesScript(String table) { - return String.format("SELECT 'DROP INDEX ' + QUOTENAME(I.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(T.schema_id))+'.'+ QUOTENAME(T.name) FROM sys.indexes I INNER JOIN sys.tables T ON I.object_id = T.object_id WHERE I.type_desc = 'NONCLUSTERED' AND I.name IS NOT NULL AND T.name = '%s'", table); + return String.format( + "SELECT 'DROP INDEX ' + QUOTENAME(I.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(T.schema_id))+'.'+ QUOTENAME(T.name) FROM sys.indexes I INNER JOIN sys.tables T ON I.object_id = T.object_id WHERE I.type_desc = 'NONCLUSTERED' AND I.name IS NOT NULL AND T.name = '%s'", + table); + } + + @Override + public String getDatabaseTimezone() { + return "UTC"; } @Override public float getDatabaseUtilization() throws SQLException { String query = "SELECT TOP 1 end_time, (SELECT Max(v) FROM (VALUES (avg_cpu_percent),(avg_data_io_percent),(avg_log_write_percent)) AS value(v)) AS [avg_DTU_percent] FROM sys.dm_db_resource_stats ORDER by end_time DESC;"; try (Connection connection = getConnection(); - Statement stmt = connection.createStatement(); - ResultSet resultSet = stmt.executeQuery(query); - ) { + Statement stmt = connection.createStatement(); + ResultSet resultSet = stmt.executeQuery(query)) { if (resultSet.next()) { return resultSet.getFloat("avg_DTU_percent"); } else { - //LOG.debug("There are no data with regard to Azure DTU"); + // LOG.debug("There are no data with regard to Azure DTU"); return -1; } } catch (Exception e) { @@ -141,6 +212,7 @@ protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource @Override protected String createUniqueColumnsQuery(String tableName) { + // spotless:off return String.format("SELECT col.name FROM (\n" + "SELECT TOP (1)\n" + " SchemaName = t.schema_id,\n" + @@ -166,15 +238,13 @@ protected String createUniqueColumnsQuery(String tableName) { " sys.index_columns ic ON t1.ObjectId = ic.object_id and t1.IndexId = ic.index_id \n" + "INNER JOIN \n" + " sys.columns col ON ic.object_id = col.object_id and ic.column_id = col.column_id \n" + - "ORDER BY ic.key_ordinal", tableName, getDataSourceConfiguration().getSchema()); + "ORDER BY ic.key_ordinal", + // spotless:on + tableName, getDataSourceConfiguration().getSchema()); } @Override public DataBaseProvider getDatabaseProvider() { return DataBaseProvider.MSSQL; } - - private String getLpTableName(String tableName){ - return StringUtils.removeEndIgnoreCase(tableName,LP_SUFFIX); - } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureIncrementalDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureIncrementalDataRepository.java index 0b1eef6..b5e7ba8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureIncrementalDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/AzureIncrementalDataRepository.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,10 +7,12 @@ package com.sap.cx.boosters.commercedbsync.repository.impl; import com.google.common.base.Joiner; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.dataset.DataSet; import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationDataTypeMapperService; import de.hybris.platform.util.Config; + import java.sql.Connection; import java.sql.PreparedStatement; import java.sql.ResultSet; @@ -19,395 +21,348 @@ import java.time.Instant; import java.util.ArrayList; import java.util.List; -import java.util.stream.Collectors; -import org.apache.commons.lang.StringUtils; + import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -public class AzureIncrementalDataRepository extends AzureDataRepository{ +public class AzureIncrementalDataRepository extends AzureDataRepository { - private static final Logger LOG = LoggerFactory.getLogger(AzureIncrementalDataRepository.class); + private static final Logger LOG = LoggerFactory.getLogger(AzureIncrementalDataRepository.class); - private static final String LP_SUFFIX = "lp"; + private static final String PK = "PK"; - private static final String PK = "PK"; + private static final String deletionTable = Config.getString("db.tableprefix", "") + "itemdeletionmarkers"; - private static String deletionTable = Config.getParameter("db.tableprefix") == null ? "" : Config.getParameter("db.tableprefix")+ "itemdeletionmarkers"; + public AzureIncrementalDataRepository(MigrationContext migrationContext, + DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); + } - public AzureIncrementalDataRepository( - DataSourceConfiguration dataSourceConfiguration, - DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } - @Override - protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { + @Override + protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - if(queryDefinition.isDeletionEnabled()) { - return buildOffsetBatchQueryForDeletion(queryDefinition,conditions); - } else if(queryDefinition.isLpTableEnabled()) { - return buildOffsetBatchQueryForLp(queryDefinition,conditions); - } - else { - return super.buildOffsetBatchQuery(queryDefinition,conditions); - } - } - - private String buildOffsetBatchQueryForLp(OffsetQueryDefinition queryDefinition, String... conditions) { - String orderBy = PK; - return String.format("SELECT * FROM %s WHERE %s ORDER BY %s OFFSET %s ROWS FETCH NEXT %s ROWS ONLY", getLpTableName(queryDefinition.getTable()), expandConditions(conditions), orderBy, queryDefinition.getOffset(), queryDefinition.getBatchSize()); - } - - private String buildOffsetBatchQueryForDeletion(OffsetQueryDefinition queryDefinition, String... conditions) { - return String.format("SELECT * FROM %s WHERE %s ORDER BY %s OFFSET %s ROWS FETCH NEXT %s ROWS ONLY", deletionTable, expandConditions(conditions), queryDefinition.getOrderByColumns(), queryDefinition.getOffset(), queryDefinition.getBatchSize()); - } - - @Override - protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - if(queryDefinition.isDeletionEnabled()) { - return buildValueBatchQueryForDeletion(queryDefinition,conditions); - } else { - return super.buildValueBatchQuery(queryDefinition,conditions); - } - } - - @Override - protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { - if(queryDefinition.isDeletionEnabled()) { - return buildBatchMarkersQueryForDeletion(queryDefinition,conditions); - } else if(queryDefinition.isLpTableEnabled()) { - return super.buildBatchMarkersQuery(queryDefinition,conditions); - } else { - return super.buildBatchMarkersQuery(queryDefinition,conditions); + if (queryDefinition.isDeletionEnabled()) { + return buildOffsetBatchQueryForDeletion(queryDefinition, conditions); + } else if (queryDefinition.isLpTableEnabled()) { + return buildOffsetBatchQueryForLp(queryDefinition, conditions); + } else { + return super.buildOffsetBatchQuery(queryDefinition, conditions); + } } - } - - @Override - public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Instant time) throws Exception { - if(queryDefinition.isDeletionEnabled()) { - return getBatchOrderedByColumnForDeletion(queryDefinition,time); - } else if(queryDefinition.isLpTableEnabled()){ - return getBatchOrderedByColumnForLptable(queryDefinition,time); - } else { - return super.getBatchOrderedByColumn(queryDefinition,time); + + private String buildOffsetBatchQueryForLp(OffsetQueryDefinition queryDefinition, String... conditions) { + return String.format("SELECT * FROM %s WHERE %s ORDER BY %s OFFSET %s ROWS FETCH NEXT %s ROWS ONLY", + getLpTableName(queryDefinition.getTable()), expandConditions(conditions), PK, + queryDefinition.getOffset(), queryDefinition.getBatchSize()); } - } - - private String buildValueBatchQueryForDeletion(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select top %s * from %s where %s order by %s", queryDefinition.getBatchSize(), deletionTable, expandConditions(conditions), queryDefinition.getColumn()); - } - - private DataSet getBatchOrderedByColumnForLptable(SeekQueryDefinition queryDefinition, Instant time) throws Exception { - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(2); - processDefaultConditions(queryDefinition.getTable(), conditionsList); - if (time != null) { - conditionsList.add("modifiedts > ?"); + + private String buildOffsetBatchQueryForDeletion(OffsetQueryDefinition queryDefinition, String... conditions) { + return String.format("SELECT * FROM %s WHERE %s ORDER BY %s OFFSET %s ROWS FETCH NEXT %s ROWS ONLY", + deletionTable, expandConditions(conditions), queryDefinition.getOrderByColumns(), + queryDefinition.getOffset(), queryDefinition.getBatchSize()); } - if (queryDefinition.getLastColumnValue() != null) { - conditionsList.add(String.format("%s >= %s", queryDefinition.getColumn(), queryDefinition.getLastColumnValue())); + + @Override + protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { + if (queryDefinition.isDeletionEnabled()) { + return buildValueBatchQueryForDeletion(queryDefinition, conditions); + } else { + return super.buildValueBatchQuery(queryDefinition, conditions); + } } - if (queryDefinition.getNextColumnValue() != null) { - conditionsList.add(String.format("%s < %s", queryDefinition.getColumn(), queryDefinition.getNextColumnValue())); + + @Override + protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { + if (queryDefinition.isDeletionEnabled()) { + return buildBatchMarkersQueryForDeletion(queryDefinition, conditions); + } else { + return super.buildBatchMarkersQuery(queryDefinition, conditions); + } } - String[] conditions = null; - List pkList = new ArrayList(); - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); + + @Override + public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Instant time) throws Exception { + if (queryDefinition.isDeletionEnabled()) { + return getBatchOrderedByColumnForDeletion(queryDefinition, time); + } else if (queryDefinition.isLpTableEnabled()) { + return getBatchOrderedByColumnForLpTable(queryDefinition, time); + } else { + return super.getBatchOrderedByColumn(queryDefinition, time); + } } - try (Connection connectionForPk = getConnection(); - PreparedStatement stmt = connectionForPk.prepareStatement(buildValueBatchQueryForLptable(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - ResultSet pkResultSet = stmt.executeQuery(); - pkList = convertToPkListForLpTable(pkResultSet); + + private String buildValueBatchQueryForDeletion(SeekQueryDefinition queryDefinition, String... conditions) { + return String.format("SELECT TOP %s * FROM %s WHERE %s ORDER BY %s", queryDefinition.getBatchSize(), + deletionTable, expandConditions(conditions), queryDefinition.getColumn()); } - // migrating LP Table no - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildValueBatchQueryForLptableWithPK(queryDefinition,pkList, conditions))) { - // stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + private DataSet getBatchOrderedByColumnForLpTable(SeekQueryDefinition queryDefinition, Instant time) + throws Exception { + // get batches with modifiedts >= configured time for incremental migration + List conditionsList = new ArrayList<>(2); + processDefaultConditions(queryDefinition.getTable(), conditionsList); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + if (queryDefinition.getLastColumnValue() != null) { + conditionsList + .add(String.format("%s >= %s", queryDefinition.getColumn(), queryDefinition.getLastColumnValue())); + } + if (queryDefinition.getNextColumnValue() != null) { + conditionsList + .add(String.format("%s < %s", queryDefinition.getColumn(), queryDefinition.getNextColumnValue())); + } + String[] conditions = null; + List pkList; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connectionForPk = getConnection(); + PreparedStatement stmt = connectionForPk + .prepareStatement(buildValueBatchQueryForLpTable(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + if (time != null) { + stmt.setTimestamp(1, Timestamp.from(time)); + } + ResultSet pkResultSet = stmt.executeQuery(); + pkList = convertToPkListForLpTable(pkResultSet); + } + + // migrating LP Table now + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildValueBatchQueryForLpTableWithPK(queryDefinition, pkList))) { + // stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + } } - } - private List convertToPkListForLpTable(ResultSet resultSet) throws Exception { - List pkList = new ArrayList<>(); - while (resultSet.next()) { - int idx = resultSet.findColumn(PK); - pkList.add(resultSet.getString(idx)); + private List convertToPkListForLpTable(ResultSet resultSet) throws Exception { + List pkList = new ArrayList<>(); + while (resultSet.next()) { + int idx = resultSet.findColumn(PK); + pkList.add(resultSet.getString(idx)); + } + return pkList; } - return pkList; - } - - private String buildValueBatchQueryForLptableWithPK(SeekQueryDefinition queryDefinition, List pkList, String... conditions ) { - - StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("select * from %s where ", queryDefinition.getTable())); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("ITEMPK in (%s) " , Joiner.on(',').join(pkList.stream().map(column -> " " + column).collect(Collectors.toList())))); - sqlBuilder.append(String.format("%s order by %s ", expandConditions(conditions), queryDefinition.getColumn())); - sqlBuilder.append(";"); - - return sqlBuilder.toString(); - } - - private String buildValueBatchQueryForLptableWithPK(OffsetQueryDefinition queryDefinition, List pkList, String... conditions ) { - - StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("select * from %s where ", queryDefinition.getTable())); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("ITEMPK in (%s) " , Joiner.on(',').join(pkList.stream().map(column -> " " + column).collect(Collectors.toList())))); - sqlBuilder.append(";"); - - return sqlBuilder.toString(); - } - private String buildValueBatchQueryForLptable(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select top %s PK from %s where %s order by %s", queryDefinition.getBatchSize(), getLpTableName(queryDefinition.getTable()), expandConditions(conditions), queryDefinition.getColumn()); - } - - private String buildOffsetBatchQueryForLptable(OffsetQueryDefinition queryDefinition, String... conditions) { - String orderBy = PK; - return String.format("SELECT PK FROM %s WHERE %s ORDER BY %s OFFSET %s ROWS FETCH NEXT %s ROWS ONLY", getLpTableName(queryDefinition.getTable()), expandConditions(conditions), orderBy, queryDefinition.getOffset(), queryDefinition.getBatchSize()); - } - - private DataSet getBatchOrderedByColumnForDeletion(SeekQueryDefinition queryDefinition, Instant time) throws Exception { - - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(3); - if (time != null) { - conditionsList.add("modifiedts > ?"); + + private String buildValueBatchQueryForLpTableWithPK(SeekQueryDefinition queryDefinition, List pkList) { + return String.format("SELECT * FROM %s WHERE \nITEMPK IN (%s) ORDER BY %s", queryDefinition.getTable(), + Joiner.on(", ").join(pkList), queryDefinition.getColumn()); } - conditionsList.add("p_table = ?"); - if (queryDefinition.getLastColumnValue() != null) { - conditionsList.add(String.format("%s >= %s", queryDefinition.getColumn(), queryDefinition.getLastColumnValue())); + + private String buildValueBatchQueryForLpTableWithPK(OffsetQueryDefinition queryDefinition, List pkList) { + return String.format("SELECT * FROM %s WHERE \nITEMPK IN (%s)", queryDefinition.getTable(), + Joiner.on(", ").join(pkList)); } - if (queryDefinition.getNextColumnValue() != null) { - conditionsList.add(String.format("%s < %s", queryDefinition.getColumn(), queryDefinition.getNextColumnValue())); + + private String buildValueBatchQueryForLpTable(SeekQueryDefinition queryDefinition, String... conditions) { + return String.format("SELECT TOP %s %s FROM %s WHERE %s ORDER BY %s", queryDefinition.getBatchSize(), PK, + getLpTableName(queryDefinition.getTable()), expandConditions(conditions), queryDefinition.getColumn()); } - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); + + private String buildOffsetBatchQueryForLpTable(OffsetQueryDefinition queryDefinition, String... conditions) { + return String.format("SELECT PK FROM %s WHERE %s ORDER BY %s OFFSET %s ROWS FETCH NEXT %s ROWS ONLY", + getLpTableName(queryDefinition.getTable()), expandConditions(conditions), PK, + queryDefinition.getOffset(), queryDefinition.getBatchSize()); } - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildValueBatchQuery(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - // setting table for the deletions - stmt.setString(2,queryDefinition.getTable()); - - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + + private DataSet getBatchOrderedByColumnForDeletion(SeekQueryDefinition queryDefinition, Instant time) + throws Exception { + List conditionsList = new ArrayList<>(3); + conditionsList.add("p_table = ?"); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + if (queryDefinition.getLastColumnValue() != null) { + conditionsList + .add(String.format("%s >= %s", queryDefinition.getColumn(), queryDefinition.getLastColumnValue())); + } + if (queryDefinition.getNextColumnValue() != null) { + conditionsList + .add(String.format("%s < %s", queryDefinition.getColumn(), queryDefinition.getNextColumnValue())); + } + String[] conditions = conditionsList.toArray(new String[conditionsList.size()]); + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildValueBatchQuery(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + stmt.setString(1, queryDefinition.getTable()); + + if (time != null) { + stmt.setTimestamp(2, Timestamp.from(time)); + } + + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + } } - } - @Override - public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { + @Override + public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { - if(queryDefinition.isDeletionEnabled()) { - return getBatchWithoutIdentifierForDeletion(queryDefinition,time); - } else if(queryDefinition.isLpTableEnabled()){ - return getBatchWithoutIdentifierForLptable(queryDefinition,time); - } else { - return super.getBatchWithoutIdentifier(queryDefinition,time); + if (queryDefinition.isDeletionEnabled()) { + return getBatchWithoutIdentifierForDeletion(queryDefinition, time); + } else if (queryDefinition.isLpTableEnabled()) { + return getBatchWithoutIdentifierForLpTable(queryDefinition, time); + } else { + return super.getBatchWithoutIdentifier(queryDefinition, time); + } } - } - private DataSet getBatchWithoutIdentifierForDeletion(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(2); + private DataSet getBatchWithoutIdentifierForDeletion(OffsetQueryDefinition queryDefinition, Instant time) + throws Exception { + // get batches with modifiedts >= configured time for incremental migration + List conditionsList = new ArrayList<>(2); - if (time != null) { - conditionsList.add("modifiedts > ?"); - } - conditionsList.add("p_table = ?"); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildOffsetBatchQuery(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - // setting table for the deletions - stmt.setString(2,queryDefinition.getTable()); - - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); - } - } - - private DataSet getBatchWithoutIdentifierForLptable(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(1); - processDefaultConditions(queryDefinition.getTable(), conditionsList); - if (time != null) { - conditionsList.add("modifiedts > ?"); - } - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - List pkList = new ArrayList(); - try (Connection connectionForPk = getConnection(); - PreparedStatement stmt = connectionForPk.prepareStatement(buildOffsetBatchQueryForLptable(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - ResultSet pkResultSet = stmt.executeQuery(); - pkList = convertToPkListForLpTable(pkResultSet); + conditionsList.add("p_table = ?"); + + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + String[] conditions = conditionsList.toArray(new String[conditionsList.size()]); + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildOffsetBatchQuery(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + stmt.setString(1, queryDefinition.getTable()); + + if (time != null) { + stmt.setTimestamp(2, Timestamp.from(time)); + } + // setting table for the deletions + + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + } } - // migrating LP Table no - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildValueBatchQueryForLptableWithPK(queryDefinition,pkList, conditions))) { - // stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + private DataSet getBatchWithoutIdentifierForLpTable(OffsetQueryDefinition queryDefinition, Instant time) + throws Exception { + List conditionsList = new ArrayList<>(1); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + List pkList; + try (Connection connectionForPk = getConnection(); + PreparedStatement stmt = connectionForPk + .prepareStatement(buildOffsetBatchQueryForLpTable(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + if (time != null) { + stmt.setTimestamp(1, Timestamp.from(time)); + } + ResultSet pkResultSet = stmt.executeQuery(); + pkList = convertToPkListForLpTable(pkResultSet); + } + + // migrating LP Table now + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildValueBatchQueryForLpTableWithPK(queryDefinition, pkList))) { + // stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + } } - } + @Override + public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) + throws Exception { + if (!queryDefinition.isDeletionEnabled()) { + return super.getBatchMarkersOrderedByColumn(queryDefinition, time); + } - @Override - public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) throws Exception { + List conditionsList = new ArrayList<>(2); + processDefaultConditions(queryDefinition.getTable(), conditionsList); - if(!queryDefinition.isDeletionEnabled()) { - return super.getBatchMarkersOrderedByColumn(queryDefinition,time); - } - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(2); - processDefaultConditions(queryDefinition.getTable(), conditionsList); - if (time != null) { - conditionsList.add("modifiedts > ?"); - } - // setting table for the deletions - conditionsList.add("p_table = ?"); + conditionsList.add("p_table = ?"); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildBatchMarkersQuery(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - // setting table for the deletions - stmt.setString(2,queryDefinition.getTable()); - - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); - } - } - - @Override - public long getRowCountModifiedAfter(String table, Instant time,boolean isDeletionEnabled, boolean lpTableMigrationEnabled) throws SQLException { - if(isDeletionEnabled) { - return getRowCountModifiedAfterforDeletion(table,time); - } else if(lpTableMigrationEnabled) { - return getRowCountModifiedAfterforLpTable(table,time); - } - else{ - return super.getRowCountModifiedAfter(table,time,false,false); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } - } - } - - private long getRowCountModifiedAfterforLpTable(String table, Instant time) throws SQLException { - List conditionsList = new ArrayList<>(1); - processDefaultConditions(table, conditionsList); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection()) { - try (PreparedStatement stmt = connection.prepareStatement(String.format("select count(*) from %s where modifiedts > ? AND %s", getLpTableName(table), expandConditions(conditions)))) { - stmt.setTimestamp(1, Timestamp.from(time)); - ResultSet resultSet = stmt.executeQuery(); - long value = 0; - if (resultSet.next()) { - value = resultSet.getLong(1); + String[] conditions = conditionsList.toArray(new String[conditionsList.size()]); + + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildBatchMarkersQuery(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + + stmt.setString(1, queryDefinition.getTable()); + + if (time != null) { + stmt.setTimestamp(2, Timestamp.from(time)); + } + + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(0, resultSet); } - return value; - } } - } - - private long getRowCountModifiedAfterforDeletion(String table, Instant time) throws SQLException { - // - List conditionsList = new ArrayList<>(2); - processDefaultConditions(table, conditionsList); - // setting table for the deletions - conditionsList.add("p_table = ?"); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection()) { - try (PreparedStatement stmt = connection.prepareStatement(String.format("select count(*) from %s where modifiedts > ? AND %s", deletionTable, expandConditions(conditions)))) { - stmt.setTimestamp(1, Timestamp.from(time)); - // setting table for the deletions - stmt.setString(2,table); - ResultSet resultSet = stmt.executeQuery(); - long value = 0; - if (resultSet.next()) { - value = resultSet.getLong(1); + + @Override + public long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, + boolean lpTableMigrationEnabled) throws SQLException { + if (isDeletionEnabled) { + return getRowCountModifiedAfterforDeletion(table, time); + } else if (lpTableMigrationEnabled) { + return getRowCountModifiedAfterForLpTable(table, time); + } else { + return super.getRowCountModifiedAfter(table, time, false, false); } - return value; - } } - } - - private String buildBatchMarkersQueryForDeletion(MarkersQueryDefinition queryDefinition, String... conditions) { - String column = queryDefinition.getColumn(); - return String.format("SELECT t.%s, t.rownum\n" + - "FROM\n" + - "(\n" + - " SELECT %s, (ROW_NUMBER() OVER (ORDER BY %s))-1 AS rownum\n" + - " FROM %s\n WHERE %s" + - ") AS t\n" + - "WHERE t.rownum %% %s = 0\n" + - "ORDER BY t.%s", column, column, column, deletionTable, expandConditions(conditions), queryDefinition.getBatchSize(), column); - } - - private long getRowCountModifiedAfterForLP(String table, Instant time) throws SQLException { - List conditionsList = new ArrayList<>(1); - - if (! StringUtils.endsWithIgnoreCase(table,LP_SUFFIX)) { - return super.getRowCountModifiedAfter(table,time,false,false); - } - table = StringUtils.removeEndIgnoreCase(table,LP_SUFFIX); - processDefaultConditions(table, conditionsList); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); + private long getRowCountModifiedAfterForLpTable(String table, Instant time) throws SQLException { + try (Connection connection = getConnection(); + PreparedStatement stmt = connection.prepareStatement( + String.format("SELECT COUNT(*) FROM %s WHERE modifiedts > ?", getLpTableName(table)))) { + stmt.setTimestamp(1, Timestamp.from(time)); + ResultSet resultSet = stmt.executeQuery(); + + if (resultSet.next()) { + return resultSet.getLong(1); + } + return 0; + } } - try (Connection connection = getConnection()) { - try (PreparedStatement stmt = connection.prepareStatement(String.format("select count(*) from %s where modifiedts > ? AND %s", table, expandConditions(conditions)))) { - stmt.setTimestamp(1, Timestamp.from(time)); - ResultSet resultSet = stmt.executeQuery(); - long value = 0; - if (resultSet.next()) { - value = resultSet.getLong(1); + + private long getRowCountModifiedAfterforDeletion(String table, Instant time) throws SQLException { + try (Connection connection = getConnection(); + PreparedStatement stmt = connection.prepareStatement( + String.format("SELECT COUNT(*) FROM %s WHERE modifiedts > ? AND p_table = ?", deletionTable))) { + stmt.setTimestamp(1, Timestamp.from(time)); + stmt.setString(2, table); + ResultSet resultSet = stmt.executeQuery(); + + if (resultSet.next()) { + return resultSet.getLong(1); + } + + return 0; } - return value; - } } - } - private String getLpTableName(String tableName){ - return StringUtils.removeEndIgnoreCase(tableName,LP_SUFFIX); - } - } + private String buildBatchMarkersQueryForDeletion(MarkersQueryDefinition queryDefinition, String... conditions) { + String column = queryDefinition.getColumn(); + // spotless:off + return String.format("SELECT t.%s, t.rownum\n" + + "FROM\n" + + "(\n" + + " SELECT %s, (ROW_NUMBER() OVER (ORDER BY %s))-1 AS rownum\n" + + " FROM %s\n WHERE %s" + + ") AS t\n" + + "WHERE t.rownum %% %s = 0\n" + + "ORDER BY t.%s", + // spotless:on + column, column, column, deletionTable, expandConditions(conditions), queryDefinition.getBatchSize(), + column); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataIncrementalRepositoryFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataIncrementalRepositoryFactory.java deleted file mode 100644 index a1a5391..0000000 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataIncrementalRepositoryFactory.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -package com.sap.cx.boosters.commercedbsync.repository.impl; - -import com.google.common.base.Strings; -import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; -import com.sap.cx.boosters.commercedbsync.repository.DataRepository; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationDataTypeMapperService; - -public class DataIncrementalRepositoryFactory extends DataRepositoryFactory { - - public DataIncrementalRepositoryFactory(DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(databaseMigrationDataTypeMapperService); - } - - public DataRepository create(DataSourceConfiguration dataSourceConfiguration) - throws Exception { - String connectionString = dataSourceConfiguration.getConnectionString(); - if (Strings.isNullOrEmpty(connectionString)) { - throw new RuntimeException("No connection string provided for data source '" + dataSourceConfiguration.getProfile() + "'"); - } else { - String connectionStringLower = connectionString.toLowerCase(); - if (connectionStringLower.startsWith("jdbc:mysql")) { - return new MySQLIncrementalDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } else if (connectionStringLower.startsWith("jdbc:sqlserver")) { - return new AzureIncrementalDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } else if (connectionStringLower.startsWith("jdbc:oracle")) { - return new OracleDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } else if (connectionStringLower.startsWith("jdbc:sap")) { - return new HanaDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } else if (connectionStringLower.startsWith("jdbc:hsqldb")) { - return new HsqlRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } else if (connectionStringLower.startsWith("jdbc:postgresql")) { - return new PostGresDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } - } - throw new RuntimeException("Cannot handle connection string for " + connectionString); - } -} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataRepositoryFactory.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataRepositoryFactory.java index 35435e8..21a1ac6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataRepositoryFactory.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/DataRepositoryFactory.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -7,41 +7,99 @@ package com.sap.cx.boosters.commercedbsync.repository.impl; import com.google.common.base.Strings; +import com.sap.cx.boosters.commercedbsync.context.IncrementalMigrationContext; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationDataTypeMapperService; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.HashSet; +import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.Set; public class DataRepositoryFactory { + private static final Logger LOG = LoggerFactory.getLogger(DataRepositoryFactory.class); + protected final DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService; public DataRepositoryFactory(DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { this.databaseMigrationDataTypeMapperService = databaseMigrationDataTypeMapperService; } - public DataRepository create(DataSourceConfiguration dataSourceConfiguration) - throws Exception { + public DataRepository create(MigrationContext migrationContext, + Set dataSourceConfigurations) throws Exception { + Objects.requireNonNull(dataSourceConfigurations); + if (dataSourceConfigurations.isEmpty()) { + return new NullRepository("no datasource specified", null); + } + Set repositories = new HashSet<>(); + for (DataSourceConfiguration dataSourceConfiguration : dataSourceConfigurations) { + try { + repositories.add(doCreate(dataSourceConfiguration, migrationContext)); + } catch (Exception e) { + LOG.error("Error creating data repository", e); + repositories.add(new NullRepository(e.getMessage(), dataSourceConfiguration)); + } + } + if (repositories.size() > 1) { + // TODO implement a CompositeRepository to handle multiple inputs/outputs + return new NullRepository("multiple data source profiles as input/output is currently not supported", null); + } else { + return repositories.stream().findFirst() + .orElseThrow(() -> new NoSuchElementException("The element being requested does not exist.")); + } + } + + protected DataRepository doCreate(DataSourceConfiguration dataSourceConfiguration, + MigrationContext migrationContext) throws Exception { String connectionString = dataSourceConfiguration.getConnectionString(); if (Strings.isNullOrEmpty(connectionString)) { - throw new RuntimeException("No connection string provided for data source '" + dataSourceConfiguration.getProfile() + "'"); + throw new RuntimeException( + "No connection string provided for data source '" + dataSourceConfiguration.getProfile() + "'"); } else { String connectionStringLower = connectionString.toLowerCase(); + boolean incremental = isIncremental(dataSourceConfiguration, migrationContext); + if (connectionStringLower.startsWith("jdbc:mysql")) { - return new MySQLDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + if (incremental) { + return new MySQLIncrementalDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); + } + + return new MySQLDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); } else if (connectionStringLower.startsWith("jdbc:sqlserver")) { - return new AzureDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + if (incremental) { + return new AzureIncrementalDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); + } + + return new AzureDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); } else if (connectionStringLower.startsWith("jdbc:oracle")) { - return new OracleDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + return new OracleDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); } else if (connectionStringLower.startsWith("jdbc:sap")) { - return new HanaDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + return new HanaDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); } else if (connectionStringLower.startsWith("jdbc:hsqldb")) { - return new HsqlRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + return new HsqlRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); } else if (connectionStringLower.startsWith("jdbc:postgresql")) { - return new PostGresDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } else if (connectionStringLower.startsWith("jdbc:postgresql")) { - return new PostGresDataRepository(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + return new PostGresDataRepository(migrationContext, dataSourceConfiguration, + databaseMigrationDataTypeMapperService); } } throw new RuntimeException("Cannot handle connection string for " + connectionString); } + + protected boolean isIncremental(DataSourceConfiguration dataSourceConfiguration, + MigrationContext migrationContext) { + return migrationContext instanceof IncrementalMigrationContext + && migrationContext.getInputProfiles().contains(dataSourceConfiguration.getProfile()); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java index c678fa3..8cafdd4 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HanaDataRepository.java @@ -1,16 +1,26 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.repository.impl; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; import java.sql.Types; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import javax.sql.DataSource; +import com.google.common.base.Joiner; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import org.apache.ddlutils.Platform; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.core.io.Resource; import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator; @@ -26,24 +36,30 @@ import de.hybris.bootstrap.ddl.HybrisPlatform; public class HanaDataRepository extends AbstractDataRepository { + private static final Logger LOG = LoggerFactory.getLogger(HanaDataRepository.class); - public HanaDataRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + public HanaDataRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); } @Override protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s offset %s", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOrderByColumns(), queryDefinition.getBatchSize(), queryDefinition.getOffset()); + return String.format("select * from %s where %s order by %s limit %s offset %s", queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getOrderByColumns(), queryDefinition.getBatchSize(), + queryDefinition.getOffset()); } @Override protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); + return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); } @Override protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { String column = queryDefinition.getColumn(); + // spotless:off return String.format("SELECT t.%s, t.rownr as \"rownum\" \n" + "FROM\n" + "(\n" + @@ -51,18 +67,40 @@ protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, " FROM %s\n WHERE %s" + ") t\n" + "WHERE mod(t.rownr,%s) = 0\n" + - "ORDER BY t.%s", column, column, column, queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getBatchSize(), column); + "ORDER BY t.%s", + // spotless:on + column, column, column, queryDefinition.getTable(), expandConditions(conditions), + queryDefinition.getBatchSize(), column); } @Override protected String createAllTableNamesQuery() { - return String.format("select distinct table_name from table_columns where lower(schema_name) = lower('%s') order by table_name", getDataSourceConfiguration().getSchema()); + return String.format( + "select distinct table_name from table_columns where lower(schema_name) = lower('%s') order by table_name", + getDataSourceConfiguration().getSchema()); } @Override protected String createAllColumnNamesQuery(String table) { - return String.format("select distinct column_name from table_columns where lower(schema_name) = lower('%s') and lower(table_name) = lower('%s')", getDataSourceConfiguration().getSchema(), table); + return String.format( + "select distinct column_name from table_columns where lower(schema_name) = lower('%s') and lower(table_name) = lower('%s')", + getDataSourceConfiguration().getSchema(), table); } + + @Override + public String getDatabaseTimezone() { + String query = "select * from M_HOST_INFORMATION where upper(KEY) like '%TIMEZONE_NAME%'"; + try (Connection conn = super.getConnection(); PreparedStatement stmt = conn.prepareStatement(query)) { + try (ResultSet rs = stmt.executeQuery()) { + rs.next(); + return rs.getString("VALUE"); + } + } catch (Exception e) { + e.getMessage(); + } + return null; + } + @Override public void runSqlScript(Resource resource) { final ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator(resource); @@ -73,42 +111,79 @@ public void runSqlScript(Resource resource) { @Override protected String createUniqueColumnsQuery(String tableName) { - return String.format("SELECT t2.\"COLUMN_NAME\"\n" + - "FROM\n" + - "(\n" + - " SELECT * FROM (\n" + - " SELECT i.\"SCHEMA_NAME\", i.\"TABLE_NAME\", i.\"INDEX_NAME\", count(*) as \"COL_COUNT\"\n" + - " FROM INDEXES i\n" + - " INNER JOIN INDEX_COLUMNS c\n" + - " ON i.\"INDEX_NAME\" = c.\"INDEX_NAME\" AND i.\"SCHEMA_NAME\" = c.\"SCHEMA_NAME\" AND i.\"TABLE_NAME\" = c.\"TABLE_NAME\"\n" + - " WHERE \n" + - " lower(i.\"SCHEMA_NAME\") = lower('%s')\n" + - " AND\n" + - " lower(i.\"TABLE_NAME\") = lower('%s')\n" + - " AND(\n" + - " lower(i.\"CONSTRAINT\") = lower('UNIQUE') OR \n" + - " lower(i.\"CONSTRAINT\") = lower('PRIMARY KEY'))\n" + - " GROUP BY i.\"SCHEMA_NAME\", i.\"TABLE_NAME\", i.\"INDEX_NAME\"\n" + - " ORDER BY COL_COUNT ASC \n" + - " )\n" + - " LIMIT 1\n" + - ") t1\n" + - "INNER JOIN INDEX_COLUMNS t2\n" + - "ON t1.\"INDEX_NAME\" = t2.\"INDEX_NAME\" AND t1.\"SCHEMA_NAME\" = t2.\"SCHEMA_NAME\" AND t1.\"TABLE_NAME\" = t2.\"TABLE_NAME\"", getDataSourceConfiguration().getSchema(), tableName); + // spotless:off + return String.format("SELECT t2.\"COLUMN_NAME\"\n" + "FROM\n" + "(\n" + " SELECT * FROM (\n" + + " SELECT i.\"SCHEMA_NAME\", i.\"TABLE_NAME\", i.\"INDEX_NAME\", count(*) as \"COL_COUNT\"\n" + + " FROM INDEXES i\n" + " INNER JOIN INDEX_COLUMNS c\n" + + " ON i.\"INDEX_NAME\" = c.\"INDEX_NAME\" AND i.\"SCHEMA_NAME\" = c.\"SCHEMA_NAME\" AND i.\"TABLE_NAME\" = c.\"TABLE_NAME\"\n" + + " WHERE \n" + " lower(i.\"SCHEMA_NAME\") = lower('%s')\n" + " AND\n" + + " lower(i.\"TABLE_NAME\") = lower('%s')\n" + " AND(\n" + + " lower(i.\"CONSTRAINT\") = lower('UNIQUE') OR \n" + + " lower(i.\"CONSTRAINT\") = lower('PRIMARY KEY'))\n" + + " GROUP BY i.\"SCHEMA_NAME\", i.\"TABLE_NAME\", i.\"INDEX_NAME\"\n" + + " ORDER BY COL_COUNT ASC \n" + " )\n" + " LIMIT 1\n" + ") t1\n" + + "INNER JOIN INDEX_COLUMNS t2\n" + + "ON t1.\"INDEX_NAME\" = t2.\"INDEX_NAME\" AND t1.\"SCHEMA_NAME\" = t2.\"SCHEMA_NAME\" AND t1.\"TABLE_NAME\" = t2.\"TABLE_NAME\"", + // spotless:on + getDataSourceConfiguration().getSchema(), tableName); } @Override protected void addCustomPlatformTypeMapping(final Platform platform) { - platform.getPlatformInfo().addNativeTypeMapping(Types.NCHAR, "NVARCHAR", Types.NVARCHAR); - platform.getPlatformInfo().addNativeTypeMapping(Types.CHAR, "VARCHAR", Types.VARCHAR); - platform.getPlatformInfo().addNativeTypeMapping(Types.DOUBLE, "DECIMAL", Types.DECIMAL); + platform.getPlatformInfo().addNativeTypeMapping(Types.NCHAR, "NVARCHAR", Types.NVARCHAR); + platform.getPlatformInfo().addNativeTypeMapping(Types.CHAR, "VARCHAR", Types.VARCHAR); + platform.getPlatformInfo().addNativeTypeMapping(Types.DOUBLE, "DECIMAL", Types.DECIMAL); // platform.getPlatformInfo().addNativeTypeMapping(-1, "NCLOB", Types.NCLOB); } + @Override public DataBaseProvider getDatabaseProvider() { return DataBaseProvider.HANA; } + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + final StringBuilder sqlBuilder = new StringBuilder(); + + sqlBuilder.append(String.format("MERGE INTO %s t", table)); + sqlBuilder.append("\n"); + sqlBuilder.append(String.format("USING (SELECT %s from dummy) s ON ", + Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())))); + sqlBuilder.append(String.format("( %s )", upsertIDs.stream() + .map(column -> String.format(" t.%s = s.%s", column, column)).collect(Collectors.joining(" AND ")))); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN MATCHED THEN UPDATE"); // update + sqlBuilder.append("\n"); + sqlBuilder.append(getBulkUpdateStatementParamList(columnsToCopy, + columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()), upsertIDs)); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); // insert + sqlBuilder.append("\n"); + sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, + columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); + + return sqlBuilder.toString(); + } + + @Override + protected String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return "(" + String.join(", ", columnsToCopy) + ") VALUES (" + String.join(", ", columnsToCopyValues) + ")"; + } + + @Override + protected String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, + List upsertIDs) { + final String upsertID = upsertIDs.get(0); // TODO handle multiple upsert IDs if needed + final List columnsToCopyMinusPK = columnsToCopy.stream().filter(s -> !s.equalsIgnoreCase(upsertID)) + .toList(); + final List columnsToCopyValuesMinusPK = columnsToCopyValues.stream() + .filter(s -> !s.equalsIgnoreCase("s." + upsertID)).toList(); + LOG.debug("getBulkUpdateStatementParamList - columnsToCopyMinusPK =" + columnsToCopyMinusPK); + return "SET " + IntStream.range(0, columnsToCopyMinusPK.size()).mapToObj( + idx -> String.format("%s = %s", columnsToCopyMinusPK.get(idx), columnsToCopyValuesMinusPK.get(idx))) + .collect(Collectors.joining(", ")); + } + @Override protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource dataSource) { HybrisPlatform instance = MigrationHybrisHANAPlatform.build(databaseSettings); @@ -116,4 +191,3 @@ protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource return instance; } } - diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HsqlRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HsqlRepository.java index 3b8708a..60a13d0 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HsqlRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/HsqlRepository.java @@ -1,11 +1,12 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.repository.impl; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationDataTypeMapperService; import de.hybris.bootstrap.ddl.DataBaseProvider; @@ -13,10 +14,13 @@ import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; +import java.util.List; + public class HsqlRepository extends AbstractDataRepository { - public HsqlRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + public HsqlRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); } @Override @@ -49,8 +53,29 @@ protected String createUniqueColumnsQuery(String tableName) { throw new UnsupportedOperationException("not implemented"); } + @Override + protected String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return null; + } + + @Override + protected String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, + List upsertIDs) { + return null; + } + @Override public DataBaseProvider getDatabaseProvider() { return DataBaseProvider.HSQL; } + + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public String getDatabaseTimezone() { + return null; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java index 477cb70..34f6f0d 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLDataRepository.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -9,35 +9,80 @@ import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; +import com.sap.cx.boosters.commercedbsync.repository.platform.MigrationHybrisMySqlPlatform; import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationDataTypeMapperService; import de.hybris.bootstrap.ddl.DataBaseProvider; +import de.hybris.bootstrap.ddl.DatabaseSettings; +import de.hybris.bootstrap.ddl.HybrisPlatform; +import org.apache.ddlutils.Platform; +import org.springframework.core.io.Resource; +import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.List; public class MySQLDataRepository extends AbstractDataRepository { - public MySQLDataRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + public MySQLDataRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); + } + + @Override + protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource dataSource) { + final HybrisPlatform platform = MigrationHybrisMySqlPlatform.build(databaseSettings); + platform.setDataSource(dataSource); + return platform; } @Override protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s,%s", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOrderByColumns(), queryDefinition.getOffset(), queryDefinition.getBatchSize()); + return String.format("select * from %s where %s order by %s limit ?,?", queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getOrderByColumns()); + } + + @Override + public void runSqlScript(Resource resource) { + final ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator(resource); + databasePopulator.setIgnoreFailedDrops(true); + databasePopulator.setSeparator("#"); + databasePopulator.execute(getDataSource()); + } + + @Override + protected boolean hasParameterizedOffsetBatchQuery() { + return true; } @Override protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); + return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); } @Override protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { String column = queryDefinition.getColumn(); - return String.format("SELECT %s,rownum\n" + + // spotless:off + return String.format( + "SELECT %s,rownum\n" + "FROM ( \n" + " SELECT \n" + " @row := @row +1 AS rownum, %s \n" + " FROM (SELECT @row :=-1) r, %s WHERE %s ORDER BY %s) ranked \n" + - "WHERE rownum %% %s = 0 ", column, column, queryDefinition.getTable(), expandConditions(conditions), column, queryDefinition.getBatchSize()); + "WHERE rownum %% ? = 0", + // spotless:on + column, column, queryDefinition.getTable(), expandConditions(conditions), column); + } + + @Override + protected boolean hasParameterizedBatchMarkersQuery() { + return true; } @Override @@ -56,6 +101,7 @@ protected String createAllColumnNamesQuery(String tableName) { @Override protected String createUniqueColumnsQuery(String tableName) { + // spotless:off return String.format( "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.STATISTICS t1\n" + "INNER JOIN \n" + @@ -69,11 +115,42 @@ protected String createUniqueColumnsQuery(String tableName) { ") t2\n" + "ON t1.TABLE_SCHEMA = t2.TABLE_SCHEMA AND t1.TABLE_NAME = t2.TABLE_NAME AND t1.INDEX_NAME = t2.INDEX_NAME\n" + ";\n", + // spotless:on getDataSourceConfiguration().getSchema(), tableName); } + @Override + protected String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return null; + } + + @Override + protected String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, + List upsertIDs) { + return null; + } + @Override public DataBaseProvider getDatabaseProvider() { return DataBaseProvider.MYSQL; } + + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + throw new UnsupportedOperationException("not implemented"); + } + + @Override + public String getDatabaseTimezone() { + String query = "SELECT @@system_time_zone as timezone"; + try (Connection conn = super.getConnection(); PreparedStatement stmt = conn.prepareStatement(query)) { + try (ResultSet rs = stmt.executeQuery()) { + rs.next(); + return rs.getString("timezone"); + } + } catch (Exception e) { + e.getMessage(); + } + return null; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLIncrementalDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLIncrementalDataRepository.java index 6fdefc2..60dc994 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLIncrementalDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/MySQLIncrementalDataRepository.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -15,6 +15,7 @@ import java.util.ArrayList; import java.util.List; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -27,181 +28,188 @@ import de.hybris.platform.util.Config; -public class MySQLIncrementalDataRepository extends MySQLDataRepository{ +public class MySQLIncrementalDataRepository extends MySQLDataRepository { - private static final Logger LOG = LoggerFactory.getLogger(MySQLIncrementalDataRepository.class); + private static final Logger LOG = LoggerFactory.getLogger(MySQLIncrementalDataRepository.class); - private static String deletionTable = Config.getParameter("db.tableprefix") == null ? "" : Config.getParameter("db.tableprefix")+ "itemdeletionmarkers"; + private static final String deletionTable = Config.getString("db.tableprefix", "") + "itemdeletionmarkers"; - public MySQLIncrementalDataRepository( - DataSourceConfiguration dataSourceConfiguration, - DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - } - @Override - protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - - if(!queryDefinition.isDeletionEnabled()) { - return super.buildOffsetBatchQuery(queryDefinition,conditions); + public MySQLIncrementalDataRepository(MigrationContext migrationContext, + DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); } - return String.format("select * from %s where %s order by %s limit %s,%s", deletionTable, expandConditions(conditions), queryDefinition.getOrderByColumns(), queryDefinition.getOffset(), queryDefinition.getBatchSize()); - } - @Override - protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - if(!queryDefinition.isDeletionEnabled()) { - return super.buildValueBatchQuery(queryDefinition,conditions); - } - return String.format("select * from %s where %s order by %s limit %s", deletionTable, expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); - } + @Override + protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - @Override - protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { - if(!queryDefinition.isDeletionEnabled()) { - return super.buildBatchMarkersQuery(queryDefinition,conditions); + if (!queryDefinition.isDeletionEnabled()) { + return super.buildOffsetBatchQuery(queryDefinition, conditions); + } + return String.format("select * from %s where %s order by %s limit ?,?", deletionTable, + expandConditions(conditions), queryDefinition.getOrderByColumns()); } - String column = queryDefinition.getColumn(); - return String.format("SELECT %s,rownum\n" + - "FROM ( \n" + - " SELECT \n" + - " @row := @row +1 AS rownum, %s \n" + - " FROM (SELECT @row :=-1) r, %s WHERE %s ORDER BY %s) ranked \n" + - "WHERE rownum %% %s = 0 ", column, column, deletionTable, expandConditions(conditions), column, queryDefinition.getBatchSize()); - } - + @Override + protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { + if (!queryDefinition.isDeletionEnabled()) { + return super.buildValueBatchQuery(queryDefinition, conditions); + } + return String.format("select * from %s where %s order by %s limit %s", deletionTable, + expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); + } + @Override + protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { + if (!queryDefinition.isDeletionEnabled()) { + return super.buildBatchMarkersQuery(queryDefinition, conditions); + } + String column = queryDefinition.getColumn(); + return String.format( + "SELECT %s,rownum\n" + "FROM ( \n" + " SELECT \n" + " @row := @row +1 AS rownum, %s \n" + + " FROM (SELECT @row :=-1) r, %s WHERE %s ORDER BY %s) ranked \n" + + "WHERE rownum %% ? = 0 ", + column, column, deletionTable, expandConditions(conditions), column); + } + + @Override + public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Instant time) throws Exception { + // + if (!queryDefinition.isDeletionEnabled()) { + return super.getBatchOrderedByColumn(queryDefinition, time); + } - @Override - public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Instant time) throws Exception { - // - if(!queryDefinition.isDeletionEnabled()) { - return super.getBatchOrderedByColumn(queryDefinition,time); + // get batches with modifiedts >= configured time for incremental migration + List conditionsList = new ArrayList<>(3); + processDefaultConditions(queryDefinition.getTable(), conditionsList); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + conditionsList.add("p_table = ?"); + if (queryDefinition.getLastColumnValue() != null) { + conditionsList + .add(String.format("%s >= %s", queryDefinition.getColumn(), queryDefinition.getLastColumnValue())); + } + if (queryDefinition.getNextColumnValue() != null) { + conditionsList + .add(String.format("%s < %s", queryDefinition.getColumn(), queryDefinition.getNextColumnValue())); + } + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildValueBatchQuery(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + if (time != null) { + stmt.setTimestamp(1, Timestamp.from(time)); + } + // setting table for the deletions + stmt.setString(2, queryDefinition.getTable()); + + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + } } - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(3); - processDefaultConditions(queryDefinition.getTable(), conditionsList); - if (time != null) { - conditionsList.add("modifiedts > ?"); - } - conditionsList.add("p_table = ?"); - if (queryDefinition.getLastColumnValue() != null) { - conditionsList.add(String.format("%s >= %s", queryDefinition.getColumn(), queryDefinition.getLastColumnValue())); - } - if (queryDefinition.getNextColumnValue() != null) { - conditionsList.add(String.format("%s < %s", queryDefinition.getColumn(), queryDefinition.getNextColumnValue())); - } - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildValueBatchQuery(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - // setting table for the deletions - stmt.setString(2,queryDefinition.getTable()); - - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); - } - } + @Override + public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { - @Override - public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { + if (!queryDefinition.isDeletionEnabled()) { + return super.getBatchWithoutIdentifier(queryDefinition, time); + } + // get batches with modifiedts >= configured time for incremental migration + List conditionsList = new ArrayList<>(2); + processDefaultConditions(queryDefinition.getTable(), conditionsList); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + conditionsList.add("p_table = ?"); + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildOffsetBatchQuery(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + if (time != null) { + stmt.setTimestamp(1, Timestamp.from(time)); + } + // setting table for the deletions + stmt.setString(2, queryDefinition.getTable()); + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(queryDefinition.getBatchId(), resultSet); + } - if(!queryDefinition.isDeletionEnabled()) { - return super.getBatchWithoutIdentifier(queryDefinition,time); - } - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(2); - processDefaultConditions(queryDefinition.getTable(), conditionsList); - if (time != null) { - conditionsList.add("modifiedts > ?"); - } - conditionsList.add("p_table = ?"); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildOffsetBatchQuery(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - // setting table for the deletions - stmt.setString(2,queryDefinition.getTable()); - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); } + @Override + public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) + throws Exception { - } - - @Override - public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) throws Exception { - - if(!queryDefinition.isDeletionEnabled()) { - return super.getBatchMarkersOrderedByColumn(queryDefinition,time); - } - //get batches with modifiedts >= configured time for incremental migration - List conditionsList = new ArrayList<>(2); - processDefaultConditions(queryDefinition.getTable(), conditionsList); - if (time != null) { - conditionsList.add("modifiedts > ?"); - } - // setting table for the deletions - conditionsList.add("p_table = ?"); + if (!queryDefinition.isDeletionEnabled()) { + return super.getBatchMarkersOrderedByColumn(queryDefinition, time); + } + // get batches with modifiedts >= configured time for incremental migration + List conditionsList = new ArrayList<>(2); + processDefaultConditions(queryDefinition.getTable(), conditionsList); + if (time != null) { + conditionsList.add("modifiedts > ?"); + } + // setting table for the deletions + conditionsList.add("p_table = ?"); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection(); - PreparedStatement stmt = connection.prepareStatement(buildBatchMarkersQuery(queryDefinition, conditions))) { - stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); - if (time != null) { - stmt.setTimestamp(1, Timestamp.from(time)); - } - // setting table for the deletions - stmt.setString(2,queryDefinition.getTable()); - - ResultSet resultSet = stmt.executeQuery(); - return convertToBatchDataSet(resultSet); + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection(); + PreparedStatement stmt = connection + .prepareStatement(buildBatchMarkersQuery(queryDefinition, conditions))) { + stmt.setFetchSize(Long.valueOf(queryDefinition.getBatchSize()).intValue()); + if (time != null) { + stmt.setTimestamp(1, Timestamp.from(time)); + } + // setting table for the deletions + stmt.setString(2, queryDefinition.getTable()); + + ResultSet resultSet = stmt.executeQuery(); + return convertToBatchDataSet(-1, resultSet); // TODO no batch ID in this kind of query definition + } } - } - @Override - public long getRowCountModifiedAfter(String table, Instant time,boolean isDeletionEnabled,boolean lpTableMigrationEnabled) throws SQLException { - if(!isDeletionEnabled) { - return super.getRowCountModifiedAfter(table,time,false,false); - } - // - List conditionsList = new ArrayList<>(2); - processDefaultConditions(table, conditionsList); - // setting table for the deletions - conditionsList.add("p_table = ?"); - String[] conditions = null; - if (conditionsList.size() > 0) { - conditions = conditionsList.toArray(new String[conditionsList.size()]); - } - try (Connection connection = getConnection()) { - try (PreparedStatement stmt = connection.prepareStatement(String.format("select count(*) from %s where modifiedts > ? AND %s", deletionTable, expandConditions(conditions)))) { - stmt.setTimestamp(1, Timestamp.from(time)); + @Override + public long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, + boolean lpTableMigrationEnabled) throws SQLException { + if (!isDeletionEnabled) { + return super.getRowCountModifiedAfter(table, time, false, false); + } + // + List conditionsList = new ArrayList<>(2); + processDefaultConditions(table, conditionsList); // setting table for the deletions - stmt.setString(2,table); - ResultSet resultSet = stmt.executeQuery(); - long value = 0; - if (resultSet.next()) { - value = resultSet.getLong(1); - } - return value; - } + conditionsList.add("p_table = ?"); + String[] conditions = null; + if (conditionsList.size() > 0) { + conditions = conditionsList.toArray(new String[conditionsList.size()]); + } + try (Connection connection = getConnection()) { + try (PreparedStatement stmt = connection + .prepareStatement(String.format("select count(*) from %s where modifiedts > ? AND %s", + deletionTable, expandConditions(conditions)))) { + stmt.setTimestamp(1, Timestamp.from(time)); + // setting table for the deletions + stmt.setString(2, table); + ResultSet resultSet = stmt.executeQuery(); + long value = 0; + if (resultSet.next()) { + value = resultSet.getLong(1); + } + return value; + } + } } - } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java new file mode 100644 index 0000000..c80630d --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/NullRepository.java @@ -0,0 +1,247 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.repository.impl; + +import com.sap.cx.boosters.commercedbsync.MarkersQueryDefinition; +import com.sap.cx.boosters.commercedbsync.OffsetQueryDefinition; +import com.sap.cx.boosters.commercedbsync.SeekQueryDefinition; +import com.sap.cx.boosters.commercedbsync.TypeSystemTable; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.logging.JDBCQueriesStore; +import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; +import com.sap.cx.boosters.commercedbsync.profile.impl.InvalidDataSourceConfigurationException; +import com.sap.cx.boosters.commercedbsync.repository.DataRepository; +import de.hybris.bootstrap.ddl.DataBaseProvider; +import org.apache.ddlutils.Platform; +import org.apache.ddlutils.model.Database; +import org.springframework.core.io.Resource; + +import javax.sql.DataSource; +import java.sql.Connection; +import java.sql.SQLException; +import java.time.Instant; +import java.util.List; +import java.util.Set; + +/** + * Represents a dummy implementation of a data repository that always throws an + * exception + */ +public class NullRepository implements DataRepository { + + private final String message; + private final DataSourceConfiguration dataSourceConfiguration; + + public NullRepository(String message, DataSourceConfiguration dataSourceConfiguration) { + this.message = message; + this.dataSourceConfiguration = dataSourceConfiguration; + } + + @Override + public Database asDatabase() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Database asDatabase(boolean reload) { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Set getAllTableNames() throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Set getAllViewNames() throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Set getAllTypeSystemTables() throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public boolean isAuditTable(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Set getAllColumnNames(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getBatchWithoutIdentifier(OffsetQueryDefinition queryDefinition, Instant time) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getBatchOrderedByColumn(SeekQueryDefinition queryDefinition, Instant time) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public long getRowCount(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public long getRowCountModifiedAfter(String table, Instant time, boolean isDeletionEnabled, + boolean lpTableMigrationEnabled) throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public long getRowCountModifiedAfter(String table, Instant time) throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getAll(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getAllModifiedAfter(String table, Instant time) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSourceConfiguration getDataSourceConfiguration() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public int executeUpdateAndCommit(String updateStatement) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public int executeUpdateAndCommitOnPrimary(String updateStatement) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public void runSqlScript(Resource resource) { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public void runSqlScriptOnPrimary(Resource resource) { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public float getDatabaseUtilization() throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public int truncateTable(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public void disableIndexesOfTable(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public void enableIndexesOfTable(String table) throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public void dropIndexesOfTable(String table) throws SQLException { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Platform asPlatform() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Platform asPlatform(boolean reload) { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataBaseProvider getDatabaseProvider() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public Connection getConnection() throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSource getDataSource() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSource getDataSourcePrimary() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getBatchMarkersOrderedByColumn(MarkersQueryDefinition queryDefinition, Instant time) + throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public DataSet getUniqueColumns(String table) throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public boolean validateConnection() throws Exception { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public JDBCQueriesStore getJdbcQueriesStore() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public void clearJdbcQueriesStore() { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + throw new InvalidDataSourceConfigurationException(this.message, this.dataSourceConfiguration); + } + + @Override + public String getDatabaseTimezone() { + + return null; + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java index 733aec2..f42c7d3 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/OracleDataRepository.java @@ -1,19 +1,28 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.repository.impl; +import java.sql.Connection; +import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.SQLException; import java.sql.Types; import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; +import java.util.stream.IntStream; import javax.sql.DataSource; +import com.google.common.base.Joiner; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import org.apache.ddlutils.Platform; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.core.io.Resource; import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator; @@ -30,27 +39,29 @@ import de.hybris.bootstrap.ddl.HybrisPlatform; public class OracleDataRepository extends AbstractDataRepository { - public OracleDataRepository(final DataSourceConfiguration dataSourceConfiguration, - final DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); - ensureJdbcCompliance(); - } - - private void ensureJdbcCompliance() { - // without this types like timestamps may not be jdbc compliant - System.getProperties().setProperty("oracle.jdbc.J2EE13Compliant", "true"); - // ORACLE_TARGET - START - System.getProperties().setProperty("oracle.jdbc.autoCommitSpecCompliant", "false"); - // ORACLE_TARGET - END - } - - @Override - protected DataSet convertToBatchDataSet(final ResultSet resultSet) throws Exception { - return convertToDataSet(resultSet, Collections.singleton("rn")); - } + private static final Logger LOG = LoggerFactory.getLogger(OracleDataRepository.class); + + public OracleDataRepository(MigrationContext migrationContext, + final DataSourceConfiguration dataSourceConfiguration, + final DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); + ensureJdbcCompliance(); + } + + private void ensureJdbcCompliance() { + // without this types like timestamps may not be jdbc compliant + System.getProperties().setProperty("oracle.jdbc.J2EE13Compliant", "true"); + System.getProperties().setProperty("oracle.jdbc.autoCommitSpecCompliant", "false"); + } + + @Override + protected DataSet convertToBatchDataSet(int batchId, final ResultSet resultSet) throws Exception { + return convertToDataSet(batchId, resultSet, Collections.singleton("rn")); + } @Override protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { + // spotless:off return String.format( "select * " + " from ( " + @@ -60,13 +71,18 @@ protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, St " over (order by %s) rn " + " from %s t where %s) " + "where rn between %s and %s " + - "order by rn", queryDefinition.getBatchSize(), queryDefinition.getOrderByColumns(), queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOffset() + 1, queryDefinition.getOffset() + queryDefinition.getBatchSize()); + "order by rn", + // spotless:on + queryDefinition.getBatchSize(), queryDefinition.getOrderByColumns(), queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getOffset() + 1, + queryDefinition.getOffset() + queryDefinition.getBatchSize()); } // https://blogs.oracle.com/oraclemagazine/on-top-n-and-pagination-queries // "Pagination in Getting Rows N Through M" @Override protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { + // spotless:off return String.format( "select * " + " from ( " + @@ -76,12 +92,16 @@ protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, Strin " over (order by t.%s) rn " + " from %s t where %s) " + "where rn <= %s " + - "order by rn", queryDefinition.getBatchSize(), queryDefinition.getColumn(), queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getBatchSize()); + "order by rn", + // spotless:on + queryDefinition.getBatchSize(), queryDefinition.getColumn(), queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getBatchSize()); } @Override protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { String column = queryDefinition.getColumn(); + // spotless:off return String.format("SELECT t.%s, t.rownr as \"rownum\" \n" + "FROM\n" + "(\n" + @@ -89,7 +109,9 @@ protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, " FROM %s\n WHERE %s" + ") t\n" + "WHERE mod(t.rownr,%s) = 0\n" + - "ORDER BY t.%s", column, column, column, queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getBatchSize(), column); + "ORDER BY t.%s", + // spotless:om + column, column, column, queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getBatchSize(), column); } @Override @@ -108,6 +130,7 @@ protected String createAllColumnNamesQuery(String table) { @Override protected String createUniqueColumnsQuery(String tableName) { + // spotless:off return String.format("SELECT t2.\"COLUMN_NAME\"\n" + "FROM\n" + "(\n" + @@ -129,102 +152,132 @@ protected String createUniqueColumnsQuery(String tableName) { ") t1\n" + "INNER JOIN ALL_IND_COLUMNS t2\n" + "ON t1.\"INDEX_NAME\" = t2.\"INDEX_NAME\" AND t1.\"OWNER\" = t2.\"INDEX_OWNER\" AND t1.\"TABLE_NAME\" = t2.\"TABLE_NAME\"", getDataSourceConfiguration().getSchema(), tableName); + // spotless:on + } + + @Override + protected Platform createPlatform(final DatabaseSettings databaseSettings, final DataSource dataSource) { + final HybrisPlatform platform = HybrisOraclePlatform.build(databaseSettings); + /* + * ORACLE_TARGET -> if the JdbcModelReader.readTables() is invoked with a null + * schemaPattern, protected Collection readTables(String catalog, String + * schemaPattern, String[] tableTypes) throws SQLException { ..then in Oracle it + * retrieves ALL the tables ..include SYS. This causes other issues such as + * Unsupported JDBC Type Exception, therefore always set the schema pattern to + * the target Oracle's schema. + */ + platform.getModelReader().setDefaultSchemaPattern(getDataSourceConfiguration().getSchema()); + platform.setDataSource(dataSource); + return platform; + } + + @Override + public void runSqlScript(final Resource resource) { + final ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator(resource); + databasePopulator.setIgnoreFailedDrops(true); + databasePopulator.setSeparator("/"); + databasePopulator.execute(getDataSource()); + + } + + @Override + public float getDatabaseUtilization() throws SQLException { + return (float) 1.00; } - @Override - protected Platform createPlatform(final DatabaseSettings databaseSettings, final DataSource dataSource) { - final HybrisPlatform platform = HybrisOraclePlatform.build(databaseSettings); - /* - * ORACLE_TARGET -> if the JdbcModelReader.readTables() is invoked with - * a null schemaPattern, protected Collection readTables(String catalog, - * String schemaPattern, String[] tableTypes) throws SQLException { - * ..then in Oracle it retrieves ALL the tables ..include SYS. This - * causes other issues such as Unsupported JDBC Type Exception, - * therefore always set the schema pattern to the target Oracle's - * schema. - */ - platform.getModelReader().setDefaultSchemaPattern(getDataSourceConfiguration().getSchema()); - platform.setDataSource(dataSource); - return platform; - } - - // ORACLE_TARGET, the separator needs to be in place for the PL/SQL style - // blocks to run, else you get an EOF exception with ; - @Override - public void runSqlScript(final Resource resource) { - final ResourceDatabasePopulator databasePopulator = new ResourceDatabasePopulator(resource); - databasePopulator.setIgnoreFailedDrops(true); - databasePopulator.setSeparator("/"); - databasePopulator.execute(getDataSource()); - - } - - @Override - public float getDatabaseUtilization() throws SQLException { - return (float) 1.00; - } - - /* - * @Override protected void addCustomPlatformTypeMapping(Platform platform) - * { //System.out.println("$$SETTING ORACLE TYPE "); - * - * platform.getPlatformInfo().addNativeTypeMapping(2009, "SQLXML"); try { - * platform.getPlatformInfo().addNativeTypeMapping(981939, "TEST"); } catch - * (Exception e) { throw e; - * - * } - * - * } - */ - - @Override - protected void addCustomPlatformTypeMapping(final Platform platform) { - // platform.getPlatformInfo().addNativeTypeMapping(Types.NCLOB, - // "NVARCHAR(MAX)"); - // platform.getPlatformInfo().addNativeTypeMapping(Types.CLOB, - // "NVARCHAR(MAX)"); - // platform.getPlatformInfo().addNativeTypeMapping(Types.NVARCHAR, - // "VARCHAR2"); - - platform.getPlatformInfo().addNativeTypeMapping(Types.NVARCHAR, "VARCHAR2"); - platform.getPlatformInfo().setHasSize(Types.NVARCHAR, true); - platform.getPlatformInfo().addNativeTypeMapping(Types.VARBINARY, "BLOB"); - platform.getPlatformInfo().setHasSize(Types.VARBINARY, false); - - platform.getPlatformInfo().addNativeTypeMapping(Types.REAL, "NUMBER(30,8)"); - platform.getPlatformInfo().setHasPrecisionAndScale(Types.REAL, false); - - platform.getPlatformInfo().addNativeTypeMapping(Types.DOUBLE, "NUMBER(30,8)"); - platform.getPlatformInfo().setHasPrecisionAndScale(Types.DOUBLE, false); - platform.getPlatformInfo().setHasSize(Types.DOUBLE, false); - - platform.getPlatformInfo().addNativeTypeMapping(Types.BIGINT, "NUMBER(20,0)"); - platform.getPlatformInfo().setHasSize(Types.BIGINT, false); - platform.getPlatformInfo().setHasPrecisionAndScale(Types.BIGINT, false); - - platform.getPlatformInfo().addNativeTypeMapping(Types.INTEGER, "NUMBER(20,0)"); - platform.getPlatformInfo().setHasSize(Types.INTEGER, false); - platform.getPlatformInfo().setHasPrecisionAndScale(Types.INTEGER, false); - - platform.getPlatformInfo().addNativeTypeMapping(Types.TINYINT, "NUMBER(1,0)"); - platform.getPlatformInfo().setHasSize(Types.TINYINT, false); - platform.getPlatformInfo().setHasPrecisionAndScale(Types.TINYINT, false); - - platform.getPlatformInfo().addNativeTypeMapping(Types.CHAR, "NUMBER(10,0)"); - platform.getPlatformInfo().setHasSize(Types.CHAR, false); - platform.getPlatformInfo().setHasPrecisionAndScale(Types.CHAR, false); - // platform.getPlatformInfo().setHasNullDefault(Types.CHAR, true); - - // platform.getPlatformInfo().addNativeTypeMapping(Types.REAL, "float"); - // platform.getPlatformInfo().addNativeTypeMapping(Types.LONGVARBINARY, - // "VARBINARY(MAX)"); - - // platform.getPlatformInfo().setHasPrecisionAndScale(Types.REAL, - // false); - } - - @Override - public DataBaseProvider getDatabaseProvider() { - return DataBaseProvider.ORACLE; - } + @Override + protected void addCustomPlatformTypeMapping(final Platform platform) { + platform.getPlatformInfo().addNativeTypeMapping(Types.NVARCHAR, "VARCHAR2"); + platform.getPlatformInfo().setHasSize(Types.NVARCHAR, true); + platform.getPlatformInfo().addNativeTypeMapping(Types.VARBINARY, "BLOB"); + platform.getPlatformInfo().setHasSize(Types.VARBINARY, false); + + platform.getPlatformInfo().addNativeTypeMapping(Types.REAL, "NUMBER(30,8)"); + platform.getPlatformInfo().setHasPrecisionAndScale(Types.REAL, false); + + platform.getPlatformInfo().addNativeTypeMapping(Types.DOUBLE, "NUMBER(30,8)"); + platform.getPlatformInfo().setHasPrecisionAndScale(Types.DOUBLE, false); + platform.getPlatformInfo().setHasSize(Types.DOUBLE, false); + + platform.getPlatformInfo().addNativeTypeMapping(Types.BIGINT, "NUMBER(20,0)"); + platform.getPlatformInfo().setHasSize(Types.BIGINT, false); + platform.getPlatformInfo().setHasPrecisionAndScale(Types.BIGINT, false); + + platform.getPlatformInfo().addNativeTypeMapping(Types.INTEGER, "NUMBER(20,0)"); + platform.getPlatformInfo().setHasSize(Types.INTEGER, false); + platform.getPlatformInfo().setHasPrecisionAndScale(Types.INTEGER, false); + + platform.getPlatformInfo().addNativeTypeMapping(Types.TINYINT, "NUMBER(1,0)"); + platform.getPlatformInfo().setHasSize(Types.TINYINT, false); + platform.getPlatformInfo().setHasPrecisionAndScale(Types.TINYINT, false); + + platform.getPlatformInfo().addNativeTypeMapping(Types.CHAR, "NUMBER(10,0)"); + platform.getPlatformInfo().setHasSize(Types.CHAR, false); + platform.getPlatformInfo().setHasPrecisionAndScale(Types.CHAR, false); + + } + + @Override + public DataBaseProvider getDatabaseProvider() { + return DataBaseProvider.ORACLE; + } + + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + final StringBuilder sqlBuilder = new StringBuilder(); + + sqlBuilder.append(String.format("MERGE INTO %s t", table)); + sqlBuilder.append("\n"); + sqlBuilder.append(String.format("USING (SELECT %s from dual) s ON (t.%s = s.%s)", + Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())), + upsertIDs.get(0), upsertIDs.get(0))); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN MATCHED THEN UPDATE"); // update + sqlBuilder.append("\n"); + sqlBuilder.append(getBulkUpdateStatementParamList(columnsToCopy, + columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()), upsertIDs)); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); // insert + sqlBuilder.append("\n"); + sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, + columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); + + return sqlBuilder.toString(); + } + + @Override + protected String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return "(" + String.join(", ", columnsToCopy) + ") VALUES (" + String.join(", ", columnsToCopyValues) + ")"; + } + + @Override + protected String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, + List upsertIDs) { + final String upsertID = upsertIDs.get(0); + final List columnsToCopyMinusPK = columnsToCopy.stream().filter(s -> !s.equalsIgnoreCase(upsertID)) + .toList(); + final List columnsToCopyValuesMinusPK = columnsToCopyValues.stream() + .filter(s -> !s.equalsIgnoreCase("s." + upsertID)).toList(); + LOG.debug("getBulkUpdateStatementParamList - columnsToCopyMinusPK =" + columnsToCopyMinusPK); + return "SET " + IntStream.range(0, columnsToCopyMinusPK.size()).mapToObj( + idx -> String.format("%s = %s", columnsToCopyMinusPK.get(idx), columnsToCopyValuesMinusPK.get(idx))) + .collect(Collectors.joining(", ")); + } + + @Override + public String getDatabaseTimezone() { + String query = "SELECT DBTIMEZONE FROM DUAL "; + try (Connection conn = super.getConnection(); PreparedStatement stmt = conn.prepareStatement(query)) { + try (ResultSet rs = stmt.executeQuery()) { + rs.next(); + if (rs.getString("DBTIMEZONE").equals("+00:00")) + return "UTC"; + else + return "Different timezone"; + } + } catch (Exception e) { + e.getMessage(); + } + return null; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/PostGresDataRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/PostGresDataRepository.java index fa694b8..b8d7175 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/PostGresDataRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/impl/PostGresDataRepository.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -8,7 +8,10 @@ import javax.sql.DataSource; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import org.apache.ddlutils.Platform; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import org.springframework.core.io.Resource; import org.springframework.jdbc.datasource.init.ResourceDatabasePopulator; @@ -23,14 +26,32 @@ import de.hybris.bootstrap.ddl.DatabaseSettings; import de.hybris.bootstrap.ddl.HybrisPlatform; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.util.Collections; +import java.util.List; +import java.util.stream.Collectors; + public class PostGresDataRepository extends AbstractDataRepository { - public PostGresDataRepository(DataSourceConfiguration dataSourceConfiguration, DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { - super(dataSourceConfiguration, databaseMigrationDataTypeMapperService); + private static final Logger LOG = LoggerFactory.getLogger(PostGresDataRepository.class); + + public PostGresDataRepository(MigrationContext migrationContext, DataSourceConfiguration dataSourceConfiguration, + DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService) { + super(migrationContext, dataSourceConfiguration, databaseMigrationDataTypeMapperService); } @Override protected String buildOffsetBatchQuery(OffsetQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s,%s", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOrderByColumns(), queryDefinition.getOffset(), queryDefinition.getBatchSize()); + final String batchQuery = String.format( + "SELECT * FROM %s WHERE %s ORDER BY %s OFFSET ? ROWS FETCH NEXT ? ROWS ONLY", + queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getOrderByColumns()); + return batchQuery; + } + + @Override + protected boolean hasParameterizedOffsetBatchQuery() { + return true; } @Override @@ -43,57 +64,80 @@ public void runSqlScript(Resource resource) { @Override protected String buildValueBatchQuery(SeekQueryDefinition queryDefinition, String... conditions) { - return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); + return String.format("select * from %s where %s order by %s limit %s", queryDefinition.getTable(), + expandConditions(conditions), queryDefinition.getColumn(), queryDefinition.getBatchSize()); } - @Override protected String buildBatchMarkersQuery(MarkersQueryDefinition queryDefinition, String... conditions) { - String column = queryDefinition.getColumn(); - return String.format("SELECT %s,rownum\n" + - "FROM ( \n" + - " SELECT \n" + - " @row := @row +1 AS rownum, %s \n" + - " FROM (SELECT @row :=-1) r, %s WHERE %s ORDER BY %s) ranked \n" + - "WHERE rownum %% %s = 0 ", column, column, queryDefinition.getTable(), expandConditions(conditions), column, queryDefinition.getBatchSize()); + final String column = queryDefinition.getColumn(); + final String tableName = queryDefinition.getTable(); + // spotless:off + return String.format("SELECT t.%s, t.rownum\n" + + "FROM\n" + + "(\n" + + " SELECT %s, (ROW_NUMBER() OVER (ORDER BY %s))-1 AS rownum\n" + + " FROM %s\n WHERE %s" + + ") AS t\n" + + "WHERE t.rownum %% ? = 0\n" + + "ORDER BY t.%s", + // spotless:on + column, column, column, tableName, expandConditions(conditions), column); + } + + @Override + protected boolean hasParameterizedBatchMarkersQuery() { + return true; + } + + @Override + protected String getLastValueCondition() { + /* + * In case of PostgreSQL JDBC driver was complaining about batch marker being a + * varchar: `org.postgresql.util.PSQLException: ERROR: operator does not exist: + * bigint >= character varying` + */ + return "%s >= CAST(? AS BIGINT)"; + } + + @Override + protected String getNextValueCondition() { + return "%s < CAST(? AS BIGINT)"; } @Override protected String createAllTableNamesQuery() { return String.format( - "select TABLE_NAME from information_schema.tables where table_schema = '%s' and TABLE_TYPE = 'BASE TABLE'", + "SELECT TABLE_NAME FROM INFORMATION_SCHEMA.TABLES WHERE TABLE_SCHEMA = '%s' AND TABLE_TYPE = 'BASE TABLE'", getDataSourceConfiguration().getSchema()); } @Override protected String createAllColumnNamesQuery(String tableName) { return String.format( - "SELECT DISTINCT COLUMN_NAME from information_schema.columns where table_schema = '%s' AND TABLE_NAME = '%s'", + "SELECT DISTINCT COLUMN_NAME FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s'", getDataSourceConfiguration().getSchema(), tableName); } @Override protected String createUniqueColumnsQuery(String tableName) { + // spotless:off return String.format( - "SELECT COLUMN_NAME FROM INFORMATION_SCHEMA.STATISTICS t1\n" + - "INNER JOIN \n" + - "(\n" + - "SELECT DISTINCT TABLE_SCHEMA, TABLE_NAME, INDEX_NAME, count(INDEX_NAME) as COL_COUNT \n" + - "FROM INFORMATION_SCHEMA.STATISTICS \n" + - "WHERE TABLE_SCHEMA = '%s' AND TABLE_NAME = '%s' AND NON_UNIQUE = 0\n" + - "GROUP BY TABLE_SCHEMA, TABLE_NAME, INDEX_NAME\n" + - "ORDER BY COL_COUNT ASC\n" + - "LIMIT 1\n" + - ") t2\n" + - "ON t1.TABLE_SCHEMA = t2.TABLE_SCHEMA AND t1.TABLE_NAME = t2.TABLE_NAME AND t1.INDEX_NAME = t2.INDEX_NAME\n" + - ";\n", + "SELECT COLUMN_NAME\n" + + " FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS C\n" + + " JOIN INFORMATION_SCHEMA.CONSTRAINT_COLUMN_USAGE AS CC\n" + + " USING (TABLE_SCHEMA, TABLE_NAME, CONSTRAINT_NAME)\n" + + " WHERE C.CONSTRAINT_TYPE IN ('UNIQUE', 'PRIMARY KEY')\n" + + " AND TABLE_SCHEMA = '%s'\n" + + " AND TABLE_NAME = '%s';", + // spotless:on getDataSourceConfiguration().getSchema(), tableName); } @Override protected void addCustomPlatformTypeMapping(final Platform platform) { - // DO nothing + // DO nothing } @Override @@ -101,10 +145,51 @@ public DataBaseProvider getDatabaseProvider() { return DataBaseProvider.POSTGRESQL; } + @Override + public String buildBulkUpsertStatement(String table, List columnsToCopy, List upsertIDs) { + // example: + // https://www.postgresqltutorial.com/postgresql-tutorial/postgresql-upsert/ + final StringBuilder sqlBuilder = new StringBuilder(); + + sqlBuilder.append("INSERT INTO ").append(table).append(" \n"); + sqlBuilder + .append(getBulkInsertStatementParamList(columnsToCopy, Collections.nCopies(columnsToCopy.size(), "?"))); + sqlBuilder.append(String.format(" ON CONFLICT (%s) DO UPDATE ", upsertIDs.get(0))).append('\n'); + sqlBuilder.append(getBulkUpdateStatementParamList(columnsToCopy, Collections.emptyList(), upsertIDs)); + + return sqlBuilder.toString(); + } + + @Override + protected String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return "(" + String.join(", ", columnsToCopy) + ") VALUES (" + String.join(", ", columnsToCopyValues) + ")"; + } + + @Override + protected String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues, + List upsertIDs) { + return "SET " + columnsToCopy.stream().filter(s -> !s.equalsIgnoreCase(upsertIDs.get(0))) + .map(column -> String.format("%s = EXCLUDED.%s", column, column)).collect(Collectors.joining(", ")); + } + @Override protected Platform createPlatform(DatabaseSettings databaseSettings, DataSource dataSource) { HybrisPlatform instance = MigrationHybrisPostGresPlatform.build(databaseSettings); instance.setDataSource(dataSource); return instance; } + + @Override + public String getDatabaseTimezone() { + String query = "SELECT abbrev FROM pg_timezone_names WHERE name = current_setting('TIMEZONE')"; + try (Connection conn = super.getConnection(); PreparedStatement stmt = conn.prepareStatement(query)) { + try (ResultSet rs = stmt.executeQuery()) { + rs.next(); + return rs.getString("abbrev"); + } + } catch (Exception e) { + LOG.warn("Failed to check database timezone", e); + } + return null; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANABuilder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANABuilder.java index d871175..44064e6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANABuilder.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANABuilder.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -9,33 +9,41 @@ import de.hybris.bootstrap.ddl.DatabaseSettings; import de.hybris.bootstrap.ddl.sql.ColumnNativeTypeDecorator; import de.hybris.bootstrap.ddl.sql.HanaSqlBuilder; -import de.hybris.bootstrap.ddl.sql.HybrisMSSqlBuilder; +import org.apache.commons.lang3.StringUtils; import org.apache.ddlutils.Platform; +import org.apache.ddlutils.alteration.RemoveColumnChange; import org.apache.ddlutils.model.Column; +import org.apache.ddlutils.model.Database; +import org.apache.ddlutils.model.Table; +import java.io.IOException; import java.sql.Types; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.stream.IntStream; public class MigrationHybrisHANABuilder extends HanaSqlBuilder { public MigrationHybrisHANABuilder(Platform platform, DatabaseSettings databaseSettings, - final Iterable columnNativeTypeDecorators) { - super(platform, databaseSettings,columnNativeTypeDecorators); + final Iterable columnNativeTypeDecorators) { + super(platform, databaseSettings, columnNativeTypeDecorators); } @Override protected String getSqlType(Column column) { /* - core-advanced-deployment.xml:661 - TODO implement more generic mapper for special attrs + * core-advanced-deployment.xml:661 TODO implement more generic mapper for + * special attrs */ final String nativeType = this.getNativeType(column); final int sizePos = nativeType.indexOf(SIZE_PLACEHOLDER); final StringBuilder sqlType = new StringBuilder(); - if((column.getTypeCode() == Types.NVARCHAR) && Integer.parseInt(column.getSize()) > 5000){ + if ((column.getTypeCode() == Types.NVARCHAR) && Integer.parseInt(column.getSize()) > 5000) { return sqlType.append("NCLOB").toString(); - } + } sqlType.append(sizePos >= 0 ? nativeType.substring(0, sizePos) : nativeType); @@ -44,35 +52,69 @@ protected String getSqlType(Column column) { sizeSpec = this.getPlatformInfo().getDefaultSize(column.getTypeCode()); } - if (sizeSpec != null) - { - if (this.getPlatformInfo().hasSize(column.getTypeCode())) { - sqlType.append("("); - sqlType.append(detectSize(column)); - sqlType.append(")"); - } else if (this.getPlatformInfo().hasPrecisionAndScale(column.getTypeCode())) { - sqlType.append("("); - sqlType.append(column.getSizeAsInt()); - sqlType.append(","); - sqlType.append(column.getScale()); - sqlType.append(")"); - } + if (sizeSpec != null) { + if (this.getPlatformInfo().hasSize(column.getTypeCode())) { + sqlType.append("("); + sqlType.append(detectSize(column)); + sqlType.append(")"); + } else if (this.getPlatformInfo().hasPrecisionAndScale(column.getTypeCode())) { + sqlType.append("("); + sqlType.append(column.getSizeAsInt()); + sqlType.append(","); + sqlType.append(column.getScale()); + sqlType.append(")"); } + } sqlType.append(sizePos >= 0 ? nativeType.substring(sizePos + "{0}".length()) : ""); return sqlType.toString(); } - //ddlutils cannot handle "complex" sizes ootb, therefore adding support here + // ddlutils cannot handle "complex" sizes ootb, therefore adding support here private String detectSize(Column column) { if (this.getPlatformInfo().hasSize(column.getTypeCode())) { if (column.getTypeCode() == Types.NVARCHAR) { - if (column.getSizeAsInt() > 255 && column.getSizeAsInt() <=5000 ) { - return ""+ 5000; + if (column.getSizeAsInt() > 255 && column.getSizeAsInt() <= 5000) { + return "" + 5000; } } else if (column.getTypeCode() == Types.DOUBLE) { - return "30,8"; + return "30,8"; } } return column.getSize(); } + + @Override + @SuppressWarnings("rawtypes") + public void processTableStructureChanges(Database currentModel, Database desiredModel, Table sourceTable, + Table targetTable, Map parameters, List changes) throws IOException { + Iterator changeIt = changes.iterator(); + + while (changeIt.hasNext()) { + if (changeIt.next()instanceof RemoveColumnChange removeColumnChange) { + processChange(currentModel, desiredModel, removeColumnChange); + changeIt.remove(); + } + } + + super.processTableStructureChanges(currentModel, desiredModel, sourceTable, targetTable, parameters, changes); + } + + protected void processChange(Database currentModel, Database desiredModel, RemoveColumnChange change) + throws IOException { + final Table changedTable = currentModel.findTable(change.getChangedTable().getName(), false); + final Column[] allColumns = changedTable.getColumns(); + + this.print("ALTER TABLE "); + this.printlnIdentifier(this.getTableName(changedTable)); + this.printIndent(); + this.print("DROP ("); + this.printIdentifier(this.getColumnName(change.getColumn())); + this.print(")"); + this.printEndOfStatement(); + + IntStream.range(0, allColumns.length) + .filter(i -> StringUtils.equalsIgnoreCase(allColumns[i].getName(), change.getColumn().getName())) + .findAny().ifPresent(changedTable::removeColumn); + } + } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANAPlatform.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANAPlatform.java index 24197f4..69ab9ad 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANAPlatform.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisHANAPlatform.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -8,19 +8,28 @@ import com.google.common.collect.ImmutableList; import de.hybris.bootstrap.ddl.DatabaseSettings; +import de.hybris.bootstrap.ddl.HanaModelReader; import de.hybris.bootstrap.ddl.HybrisHanaPlatform; import de.hybris.bootstrap.ddl.HybrisPlatform; import de.hybris.bootstrap.ddl.jdbc.PlatformJDBCMappingProvider; import de.hybris.bootstrap.ddl.sql.ColumnNativeTypeDecorator; import de.hybris.bootstrap.ddl.sql.HanaBlobColumnNativeTypeDecorator; +import org.apache.commons.lang3.StringUtils; +import org.apache.ddlutils.Platform; import org.apache.ddlutils.PlatformInfo; +import org.apache.ddlutils.model.Database; import org.apache.ddlutils.model.JdbcTypeCategoryEnum; +import org.apache.ddlutils.model.Table; import org.apache.ddlutils.model.TypeMap; +import org.apache.ddlutils.platform.DatabaseMetaDataWrapper; import org.apache.ddlutils.platform.SqlBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.sql.Connection; +import java.sql.SQLException; import java.sql.Types; +import java.util.Map; public class MigrationHybrisHANAPlatform extends HybrisHanaPlatform implements HybrisPlatform { @@ -28,7 +37,6 @@ public class MigrationHybrisHANAPlatform extends HybrisHanaPlatform implements H private SqlBuilder sqlBuilder; - private MigrationHybrisHANAPlatform(final DatabaseSettings databaseSettings) { super(databaseSettings); } @@ -38,11 +46,13 @@ public static HybrisPlatform build(DatabaseSettings databaseSettings) { final MigrationHybrisHANAPlatform instance = new MigrationHybrisHANAPlatform(databaseSettings); HANAHybrisTypeMap.register(); instance.provideCustomMapping(); - instance.setSqlBuilder(new MigrationHybrisHANABuilder(instance, databaseSettings, getNativeTypeDecorators(databaseSettings))); + instance.setModelReader(new MigrationHanaModelReader(instance, databaseSettings.getTablePrefix())); + instance.setSqlBuilder( + new MigrationHybrisHANABuilder(instance, databaseSettings, getNativeTypeDecorators(databaseSettings))); return instance; } - private void provideCustomMapping() - { + + private void provideCustomMapping() { final PlatformInfo platformInfo = getPlatformInfo(); platformInfo.setMaxColumnNameLength(PlatformJDBCMappingProvider.MAX_COLUMN_NAME_LENGTH); @@ -50,8 +60,9 @@ private void provideCustomMapping() platformInfo.addNativeTypeMapping(PlatformJDBCMappingProvider.HYBRIS_PK, "BIGINT", Types.BIGINT); platformInfo.addNativeTypeMapping(PlatformJDBCMappingProvider.HYBRIS_LONG_STRING, "NCLOB", Types.NCLOB); platformInfo.addNativeTypeMapping(PlatformJDBCMappingProvider.HYBRIS_JSON, "NCLOB", Types.LONGVARCHAR); - platformInfo.addNativeTypeMapping(PlatformJDBCMappingProvider.HYBRIS_COMMA_SEPARATED_PKS, "NVARCHAR{0}", Types.NVARCHAR); - // platformInfo.addNativeTypeMapping(2011, "NCLOB"); + platformInfo.addNativeTypeMapping(PlatformJDBCMappingProvider.HYBRIS_COMMA_SEPARATED_PKS, "NVARCHAR{0}", + Types.NVARCHAR); + // platformInfo.addNativeTypeMapping(2011, "NCLOB"); platformInfo.setHasSize(PlatformJDBCMappingProvider.HYBRIS_LONG_STRING, true); platformInfo.setHasSize(PlatformJDBCMappingProvider.HYBRIS_COMMA_SEPARATED_PKS, true); @@ -74,10 +85,11 @@ protected void setSqlBuilder(SqlBuilder builder) { this.sqlBuilder = builder; } - private static Iterable getNativeTypeDecorators(final DatabaseSettings databaseSettings) - { + private static Iterable getNativeTypeDecorators( + final DatabaseSettings databaseSettings) { return ImmutableList.of(new HanaBlobColumnNativeTypeDecorator(databaseSettings)); } + static class HANAHybrisTypeMap extends TypeMap { static void register() { @@ -86,4 +98,29 @@ static void register() { } } + private static final class MigrationHanaModelReader extends HanaModelReader { + private String currentSchema; + + public MigrationHanaModelReader(Platform platform, String tablePrefix) { + super(platform, tablePrefix); + } + + @Override + public Database getDatabase(Connection connection, String name) throws SQLException { + return this.getDatabase(connection, name, null, connection.getSchema(), null); + } + + @Override + protected Table readTable(DatabaseMetaDataWrapper metaData, Map values) throws SQLException { + if (currentSchema == null) { + currentSchema = metaData.getMetaData().getConnection().getSchema(); + } + + if (!StringUtils.equalsIgnoreCase(currentSchema, (String) values.get("TABLE_SCHEM"))) { + return null; + } + + return super.readTable(metaData, values); + } + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlBuilder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlBuilder.java index 03e0acd..8a3aade 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlBuilder.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -22,10 +22,10 @@ public MigrationHybrisMSSqlBuilder(Platform platform, DatabaseSettings databaseS @Override protected String getSqlType(Column column) { /* - core-advanced-deployment.xml:661 - TODO implement more generic mapper for special attrs + * core-advanced-deployment.xml:661 TODO implement more generic mapper for + * special attrs */ - if (column.getName().equalsIgnoreCase("InheritancePathString")) { + if ("InheritancePathString".equalsIgnoreCase(column.getName())) { return "varchar(1800)"; } String nativeType = this.getNativeType(column); @@ -55,7 +55,7 @@ protected String getSqlType(Column column) { return sqlType.toString(); } - //ddlutils cannot handle "complex" sizes ootb, therefore adding support here + // ddlutils cannot handle "complex" sizes ootb, therefore adding support here private String detectSize(Column column) { if (this.getPlatformInfo().hasSize(column.getTypeCode())) { if (column.getTypeCode() == Types.NVARCHAR) { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlPlatform.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlPlatform.java index bc0e138..2ff1d9a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlPlatform.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMSSqlPlatform.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -23,7 +23,7 @@ import java.sql.Connection; import java.sql.SQLException; -import java.util.HashSet; +import java.sql.Types; import java.util.Map; import java.util.Set; @@ -31,7 +31,6 @@ public class MigrationHybrisMSSqlPlatform extends MSSqlPlatform implements Hybri private static final Logger LOG = LoggerFactory.getLogger(MigrationHybrisMSSqlPlatform.class); - private MigrationHybrisMSSqlPlatform() { } @@ -39,35 +38,36 @@ public static HybrisPlatform build(DatabaseSettings databaseSettings) { MigrationHybrisMSSqlPlatform instance = new MigrationHybrisMSSqlPlatform(); instance.provideCustomMapping(); instance.setSqlBuilder(new MigrationHybrisMSSqlBuilder(instance, databaseSettings)); - MigrationHybrisMSSqlPlatform.HybrisMSSqlModelReader reader = new MigrationHybrisMSSqlPlatform.HybrisMSSqlModelReader(instance); + MigrationHybrisMSSqlPlatform.HybrisMSSqlModelReader reader = new MigrationHybrisMSSqlPlatform.HybrisMSSqlModelReader( + instance); reader.setDefaultTablePattern(databaseSettings.getTablePrefix() + '%'); instance.setModelReader(reader); return instance; } public Database readModelFromDatabase(String name) throws DatabaseOperationException { - return this.readModelFromDatabase(name, (String) null, (String) null, (String[]) null); + return this.readModelFromDatabase(name, null, null, null); } private void provideCustomMapping() { PlatformInfo platformInfo = this.getPlatformInfo(); platformInfo.setMaxColumnNameLength(30); - platformInfo.addNativeTypeMapping(12002, "BIGINT", -5); - platformInfo.addNativeTypeMapping(12000, "NVARCHAR(MAX)", -1); - platformInfo.addNativeTypeMapping(12003, "NVARCHAR(MAX)", -1); - platformInfo.addNativeTypeMapping(12001, "NVARCHAR(MAX)", -1); - platformInfo.addNativeTypeMapping(-5, "BIGINT"); - platformInfo.addNativeTypeMapping(12, "NVARCHAR"); - platformInfo.addNativeTypeMapping(-7, "TINYINT"); - platformInfo.addNativeTypeMapping(4, "INTEGER"); - platformInfo.addNativeTypeMapping(5, "INTEGER"); - platformInfo.addNativeTypeMapping(-6, "TINYINT", -6); - platformInfo.addNativeTypeMapping(8, "FLOAT", 8); - platformInfo.addNativeTypeMapping(6, "FLOAT", 8); - platformInfo.addNativeTypeMapping(-9, "NVARCHAR", -9); - platformInfo.addNativeTypeMapping(92, "DATETIME2", 93); - platformInfo.addNativeTypeMapping(93, "DATETIME2"); - platformInfo.addNativeTypeMapping(2004, "VARBINARY(MAX)"); + platformInfo.addNativeTypeMapping(12002, "BIGINT", Types.BIGINT); + platformInfo.addNativeTypeMapping(12000, "NVARCHAR(MAX)", Types.LONGVARCHAR); + platformInfo.addNativeTypeMapping(12003, "NVARCHAR(MAX)", Types.LONGVARCHAR); + platformInfo.addNativeTypeMapping(12001, "NVARCHAR(MAX)", Types.LONGVARCHAR); + platformInfo.addNativeTypeMapping(Types.BIGINT, "BIGINT"); + platformInfo.addNativeTypeMapping(Types.VARCHAR, "NVARCHAR"); + platformInfo.addNativeTypeMapping(Types.BIT, "TINYINT"); + platformInfo.addNativeTypeMapping(Types.INTEGER, "INTEGER"); + platformInfo.addNativeTypeMapping(Types.SMALLINT, "INTEGER"); + platformInfo.addNativeTypeMapping(Types.TINYINT, "TINYINT", Types.TINYINT); + platformInfo.addNativeTypeMapping(Types.DOUBLE, "FLOAT", Types.DOUBLE); + platformInfo.addNativeTypeMapping(Types.FLOAT, "FLOAT", Types.DOUBLE); + platformInfo.addNativeTypeMapping(Types.NVARCHAR, "NVARCHAR", Types.NVARCHAR); + platformInfo.addNativeTypeMapping(Types.TIME, "DATETIME2", Types.TIMESTAMP); + platformInfo.addNativeTypeMapping(Types.TIMESTAMP, "DATETIME2"); + platformInfo.addNativeTypeMapping(Types.BLOB, "VARBINARY(MAX)"); } public String getTableName(Table table) { @@ -79,7 +79,8 @@ public String getColumnName(Column column) { } @Override - public void alterTables(Connection connection, Database desiredModel, boolean continueOnError) throws DatabaseOperationException { + public void alterTables(Connection connection, Database desiredModel, boolean continueOnError) + throws DatabaseOperationException { String sql = this.getAlterTablesSql(connection, desiredModel); LOG.info(sql); this.evaluateBatch(connection, sql, continueOnError); @@ -87,12 +88,8 @@ public void alterTables(Connection connection, Database desiredModel, boolean co private static class HybrisMSSqlModelReader extends MSSqlModelReader { private static final String TABLE_NAME_KEY = "TABLE_NAME"; - private final Set tablesToExclude = new HashSet() { - { - this.add("trace_xe_action_map"); - this.add("trace_xe_event_map"); - } - }; + + private final Set tablesToExclude = Set.of("trace_xe_action_map", "trace_xe_event_map"); public HybrisMSSqlModelReader(Platform platform) { super(platform); @@ -108,7 +105,7 @@ private boolean tableShouldBeExcluded(Map values) { } private String getTableNameFrom(Map values) { - return (String) values.get("TABLE_NAME"); + return (String) values.get(TABLE_NAME_KEY); } } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMySqlBuilder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMySqlBuilder.java new file mode 100644 index 0000000..322a5a6 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMySqlBuilder.java @@ -0,0 +1,52 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.repository.platform; + +import de.hybris.bootstrap.ddl.DatabaseSettings; +import de.hybris.bootstrap.ddl.sql.HybrisMySqlBuilder; +import org.apache.commons.lang3.StringUtils; +import org.apache.ddlutils.Platform; +import org.apache.ddlutils.model.Column; +import org.apache.ddlutils.model.TypeMap; + +import java.sql.Types; + +public class MigrationHybrisMySqlBuilder extends HybrisMySqlBuilder { + + public MigrationHybrisMySqlBuilder(Platform platform, DatabaseSettings databaseSettings) { + super(platform, databaseSettings); + } + + @Override + protected String getSqlType(Column column) { + if (column.getTypeCode() == Types.NVARCHAR && Integer.parseInt(column.getSize()) > 5000) { + return "TEXT"; + } + + if (column.getTypeCode() == Types.VARBINARY && Integer.parseInt(column.getSize()) > 65535) { + return "LONGBLOB"; + } + + if (column.getTypeCode() == Types.TIMESTAMP) { + final StringBuilder nativeType = new StringBuilder(getPlatformInfo().getNativeType(column.getTypeCode())); + + if (getPlatformInfo().hasSize(column.getTypeCode())) { + nativeType.append('(').append(getPlatformInfo().getDefaultSize(column.getTypeCode())).append(')'); + } + + return nativeType.toString(); + } + + return super.getSqlType(column); + } + + @Override + public boolean isValidDefaultValue(String defaultSpec, int typeCode) { + return StringUtils.isNumeric(defaultSpec) + && (defaultSpec.length() > 0 || !TypeMap.isNumericType(typeCode) && !TypeMap.isDateTimeType(typeCode)); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMySqlPlatform.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMySqlPlatform.java new file mode 100644 index 0000000..638514e --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisMySqlPlatform.java @@ -0,0 +1,80 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.repository.platform; + +import de.hybris.bootstrap.ddl.DatabaseSettings; +import de.hybris.bootstrap.ddl.HybrisPlatform; +import de.hybris.bootstrap.ddl.sql.HybrisMySqlBuilder; +import org.apache.ddlutils.PlatformInfo; +import org.apache.ddlutils.model.Column; +import org.apache.ddlutils.model.Table; +import org.apache.ddlutils.platform.mysql.MySql50ModelReader; +import org.apache.ddlutils.platform.mysql.MySql50Platform; + +import java.util.Objects; + +public class MigrationHybrisMySqlPlatform extends MySql50Platform implements HybrisPlatform { + private static final String MYSQL_ALLOW_FRACTIONAL_SECONDS = "mysql.allow.fractional.seconds"; + private final boolean isFractionalSecondsSupportEnabled; + + private MigrationHybrisMySqlPlatform(boolean isFractionalSecondsSupportEnabled) { + this.isFractionalSecondsSupportEnabled = isFractionalSecondsSupportEnabled; + } + + public static HybrisPlatform build(DatabaseSettings databaseSettings) { + Objects.requireNonNull(databaseSettings); + boolean allowFractionaSeconds = Boolean + .parseBoolean(databaseSettings.getProperty(MYSQL_ALLOW_FRACTIONAL_SECONDS, Boolean.TRUE.toString())); + MigrationHybrisMySqlPlatform instance = new MigrationHybrisMySqlPlatform(allowFractionaSeconds); + instance.provideCustomMapping(); + instance.setSqlBuilder(new MigrationHybrisMySqlBuilder(instance, databaseSettings)); + MySql50ModelReader reader = new MySql50ModelReader(instance); + reader.setDefaultTablePattern(databaseSettings.getTablePrefix() + "%"); + instance.setModelReader(reader); + return instance; + } + + private void provideCustomMapping() { + PlatformInfo platformInfo = this.getPlatformInfo(); + platformInfo.setMaxColumnNameLength(30); + platformInfo.addNativeTypeMapping(-1, "TEXT"); + platformInfo.addNativeTypeMapping(12002, "BIGINT", -5); + platformInfo.addNativeTypeMapping(12000, "TEXT", -1); + platformInfo.addNativeTypeMapping(12003, "LONGTEXT", -1); + platformInfo.addNativeTypeMapping(12001, "TEXT", -1); + platformInfo.addNativeTypeMapping(12, "VARCHAR", 12); + platformInfo.setDefaultSize(12, 255); + platformInfo.addNativeTypeMapping(6, "FLOAT{0}"); + platformInfo.setHasPrecisionAndScale(6, true); + + platformInfo.addNativeTypeMapping(-5, "BIGINT"); + platformInfo.addNativeTypeMapping(-7, "TINYINT"); + platformInfo.addNativeTypeMapping(4, "INTEGER"); + platformInfo.addNativeTypeMapping(5, "INTEGER"); + platformInfo.addNativeTypeMapping(-6, "TINYINT", -6); + platformInfo.addNativeTypeMapping(8, "FLOAT", 8); + platformInfo.addNativeTypeMapping(-9, "VARCHAR", -9); + platformInfo.setDefaultSize(-9, 255); + platformInfo.setHasSize(-9, true); + platformInfo.addNativeTypeMapping(92, "DATETIME", 93); + platformInfo.addNativeTypeMapping(93, "DATETIME"); + platformInfo.addNativeTypeMapping(2004, "LONGBLOB"); + + if (this.isFractionalSecondsSupportEnabled) { + platformInfo.setHasSize(93, true); + platformInfo.setDefaultSize(93, 6); + } + } + + public String getColumnName(Column column) { + return ((HybrisMySqlBuilder) this.getSqlBuilder()).getColumnName(column); + } + + public String getTableName(Table table) { + return this.getSqlBuilder().getTableName(table); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresBuilder.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresBuilder.java index 2d018f6..03fb311 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresBuilder.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresBuilder.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -11,6 +11,7 @@ import org.apache.ddlutils.model.Column; import org.apache.ddlutils.model.TypeMap; import org.apache.ddlutils.platform.postgresql.PostgreSqlBuilder; + import java.sql.Types; public class MigrationHybrisPostGresBuilder extends PostgreSqlBuilder { @@ -26,7 +27,7 @@ protected String getSqlType(Column column) { int sizePos = nativeType.indexOf("{0}"); StringBuilder sqlType = new StringBuilder(); - if((column.getTypeCode() == Types.NVARCHAR) && Integer.parseInt(column.getSize()) > 5000){ + if ((column.getTypeCode() == Types.NVARCHAR) && Integer.parseInt(column.getSize()) > 5000) { return sqlType.append("text").toString(); } @@ -54,7 +55,7 @@ protected String getSqlType(Column column) { return sqlType.toString(); } - //ddlutils cannot handle "complex" sizes ootb, therefore adding support here + // ddlutils cannot handle "complex" sizes ootb, therefore adding support here private String detectSize(Column column) { if (this.getPlatformInfo().hasSize(column.getTypeCode())) { if (column.getTypeCode() == Types.NVARCHAR) { @@ -76,15 +77,14 @@ private String detectSize(Column column) { return column.getSize(); } - @Override - public boolean isValidDefaultValue(String defaultSpec, int typeCode) { - return defaultSpec != null && StringUtils.isNumeric(defaultSpec) && (defaultSpec.length() > 0 || !TypeMap.isNumericType(typeCode) && !TypeMap.isDateTimeType(typeCode)); + public boolean isValidDefaultValue(String defaultSpec, int typeCode) { + return defaultSpec != null && StringUtils.isNumeric(defaultSpec) + && (defaultSpec.length() > 0 || !TypeMap.isNumericType(typeCode) && !TypeMap.isDateTimeType(typeCode)); } @Override - public String getColumnName(final Column column) - { + public String getColumnName(final Column column) { return column.getName(); } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresPlatform.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresPlatform.java index b3ad417..283de1a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresPlatform.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/repository/platform/MigrationHybrisPostGresPlatform.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -8,19 +8,24 @@ import de.hybris.bootstrap.ddl.DatabaseSettings; import de.hybris.bootstrap.ddl.HybrisPlatform; +import org.apache.ddlutils.Platform; import org.apache.ddlutils.PlatformInfo; import org.apache.ddlutils.model.Column; +import org.apache.ddlutils.model.Database; import org.apache.ddlutils.model.Table; +import org.apache.ddlutils.platform.postgresql.PostgreSqlModelReader; import org.apache.ddlutils.platform.postgresql.PostgreSqlPlatform; import org.slf4j.Logger; import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.SQLException; import java.sql.Types; public class MigrationHybrisPostGresPlatform extends PostgreSqlPlatform implements HybrisPlatform { private static final Logger LOG = LoggerFactory.getLogger(MigrationHybrisPostGresPlatform.class); - private MigrationHybrisPostGresPlatform() { super(); } @@ -29,16 +34,16 @@ public static HybrisPlatform build(DatabaseSettings databaseSettings) { MigrationHybrisPostGresPlatform instance = new MigrationHybrisPostGresPlatform(); instance.provideCustomMapping(); instance.setSqlBuilder(new MigrationHybrisPostGresBuilder(instance)); + instance.setModelReader(new MigrationPostgreSqlModelReader(instance)); return instance; } - private void provideCustomMapping() { PlatformInfo platformInfo = this.getPlatformInfo(); platformInfo.setMaxColumnNameLength(31); platformInfo.addNativeTypeMapping(Types.NVARCHAR, "VARCHAR", Types.VARCHAR); - platformInfo.addNativeTypeMapping(Types.NCHAR, "int2", Types.TINYINT); - platformInfo.addNativeTypeMapping(Types.CHAR, "int2", Types.TINYINT); + platformInfo.addNativeTypeMapping(Types.NCHAR, "int2", Types.TINYINT); + platformInfo.addNativeTypeMapping(Types.CHAR, "int2", Types.TINYINT); platformInfo.setHasSize(Types.CHAR, false); platformInfo.setHasSize(Types.NCHAR, false); platformInfo.setHasSize(Types.NVARCHAR, true); @@ -58,4 +63,15 @@ public String getTableName(Table table) { public String getColumnName(Column column) { return ((MigrationHybrisPostGresBuilder) this.getSqlBuilder()).getColumnName(column); } + + private static final class MigrationPostgreSqlModelReader extends PostgreSqlModelReader { + public MigrationPostgreSqlModelReader(Platform platform) { + super(platform); + } + + @Override + public Database getDatabase(Connection connection, String name) throws SQLException { + return this.getDatabase(connection, name, null, connection.getSchema(), null); + } + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopyScheduler.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopyScheduler.java index a4bd880..db8d914 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopyScheduler.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopyScheduler.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -17,6 +17,8 @@ public interface DatabaseCopyScheduler { void schedule(CopyContext context) throws Exception; + void resumeUnfinishedItems(CopyContext copyContext) throws Exception; + MigrationStatus getCurrentState(CopyContext context, OffsetDateTime since) throws Exception; boolean isAborted(CopyContext context) throws Exception; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopySchedulerAlgorithm.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopySchedulerAlgorithm.java new file mode 100644 index 0000000..6286eec --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/DatabaseCopySchedulerAlgorithm.java @@ -0,0 +1,19 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.scheduler; + +import java.util.List; + +public interface DatabaseCopySchedulerAlgorithm { + int getOwnNodeId(); + + List getNodeIds(); + + int next(); + + void reset(); +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java index 241d094..06e7206 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/CustomClusterDatabaseCopyScheduler.java @@ -1,46 +1,45 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.scheduler.impl; +import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.adapter.impl.ContextualDataRepositoryAdapter; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.events.CopyCompleteEvent; +import com.sap.cx.boosters.commercedbsync.events.CopyDatabaseTableEvent; +import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import com.sap.cx.boosters.commercedbsync.scheduler.DatabaseCopyScheduler; -import de.hybris.platform.cluster.PingBroadcastHandler; +import com.sap.cx.boosters.commercedbsync.scheduler.DatabaseCopySchedulerAlgorithm; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; +import de.hybris.bootstrap.ddl.DataBaseProvider; import de.hybris.platform.core.Registry; import de.hybris.platform.core.Tenant; import de.hybris.platform.jalo.JaloSession; -import de.hybris.platform.servicelayer.cluster.ClusterService; import de.hybris.platform.servicelayer.event.EventService; +import de.hybris.platform.util.Config; import org.apache.commons.collections4.CollectionUtils; import org.apache.commons.lang3.time.DurationFormatUtils; import org.apache.commons.lang3.tuple.Pair; import com.sap.cx.boosters.commercedbsync.MigrationProgress; import com.sap.cx.boosters.commercedbsync.MigrationStatus; -import com.sap.cx.boosters.commercedbsync.adapter.DataRepositoryAdapter; -import com.sap.cx.boosters.commercedbsync.adapter.impl.ContextualDataRepositoryAdapter; -import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; -import com.sap.cx.boosters.commercedbsync.events.CopyDatabaseTableEvent; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTask; -import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; -import com.sap.cx.boosters.commercedbsync.views.TableViewGenerator; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; import org.springframework.core.io.ClassPathResource; -import java.sql.SQLException; import java.time.Duration; import java.time.Instant; import java.time.OffsetDateTime; import java.time.ZoneOffset; import java.time.temporal.ChronoUnit; import java.util.ArrayList; -import java.util.Collections; import java.util.Comparator; import java.util.List; import java.util.Map; @@ -50,7 +49,6 @@ import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MDC_CLUSTERID; import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MDC_PIPELINE; -import static org.mockito.ArgumentMatchers.contains; /** * Scheduler for Cluster Based Migrations @@ -61,10 +59,10 @@ public class CustomClusterDatabaseCopyScheduler implements DatabaseCopyScheduler private EventService eventService; - private ClusterService clusterService; - private DatabaseCopyTaskRepository databaseCopyTaskRepository; + private DatabaseCopySchedulerAlgorithm databaseCopySchedulerAlgorithm; + /** * Schedules a Data Copy Task for each table across all the available nodes * @@ -73,98 +71,119 @@ public class CustomClusterDatabaseCopyScheduler implements DatabaseCopyScheduler */ @Override public void schedule(CopyContext context) throws Exception { - String sqlScript = ""; - // ORACLE_TARGET - START - if (context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { - sqlScript = "/sql/createSchedulerTablesOracle.sql"; - } else if(context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isHanaUsed()){ - sqlScript = "/sql/createSchedulerTablesHana.sql"; - } else if(context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isPostgreSqlUsed()){ - sqlScript = "/sql/createSchedulerTablesPostGres.sql"; - } else { - sqlScript = "/sql/createSchedulerTables.sql"; - } + databaseCopySchedulerAlgorithm.reset(); + logMigrationContext(context.getMigrationContext()); - // ORACLE_TARGET - END - context.getMigrationContext().getDataTargetRepository().runSqlScript(new ClassPathResource(sqlScript)); - int ownNodeId = clusterService.getClusterId(); + + final DataRepository repository = !context.getMigrationContext().isDataExportEnabled() + ? context.getMigrationContext().getDataTargetRepository() + : context.getMigrationContext().getDataSourceRepository(); + final DataBaseProvider databaseProvider = repository.getDatabaseProvider(); + final ClassPathResource scriptResource = new ClassPathResource( + String.format("/sql/createSchedulerTables%s.sql", databaseProvider)); + + if (!scriptResource.exists()) { + throw new IllegalStateException( + "Scheduler tables creation script for database " + databaseProvider + " not found!"); + } + + repository.runSqlScript(scriptResource); + + int ownNodeId = databaseCopySchedulerAlgorithm.getOwnNodeId(); if (!CollectionUtils.isEmpty(context.getCopyItems())) { databaseCopyTaskRepository.createMigrationStatus(context); - final List nodeIds = getClusterNodes(context); - int nodeIndex = 0; - DataRepositoryAdapter dataRepositoryAdapter = new ContextualDataRepositoryAdapter(context.getMigrationContext().getDataSourceRepository()); - List> itemsToSchedule = generateSchedulerItemList(context, dataRepositoryAdapter); + DataRepositoryAdapter dataRepositoryAdapter = new ContextualDataRepositoryAdapter( + context.getMigrationContext().getDataSourceRepository()); + List> itemsToSchedule = generateSchedulerItemList(context, + dataRepositoryAdapter); for (final Pair itemToSchedule : itemsToSchedule) { CopyContext.DataCopyItem dataCopyItem = itemToSchedule.getLeft(); final long sourceRowCount = itemToSchedule.getRight(); if (sourceRowCount > 0) { - if (nodeIndex >= (nodeIds.size())) { - nodeIndex = 0; - } - final int destinationNodeId = nodeIds.get(nodeIndex); + final int destinationNodeId = databaseCopySchedulerAlgorithm.next(); databaseCopyTaskRepository.scheduleTask(context, dataCopyItem, sourceRowCount, destinationNodeId); - nodeIndex++; } else { databaseCopyTaskRepository.scheduleTask(context, dataCopyItem, sourceRowCount, ownNodeId); databaseCopyTaskRepository.markTaskCompleted(context, dataCopyItem, "0"); - if(!context.getMigrationContext().isIncrementalModeEnabled() && context.getMigrationContext().isTruncateEnabled()) { - context.getMigrationContext().getDataTargetRepository().truncateTable(dataCopyItem.getTargetItem()); + if (!context.getMigrationContext().isIncrementalModeEnabled() + && context.getMigrationContext().isTruncateEnabled()) { + context.getMigrationContext().getDataTargetRepository() + .truncateTable(dataCopyItem.getTargetItem()); } } } startMonitorThread(context); - final CopyDatabaseTableEvent event = new CopyDatabaseTableEvent(ownNodeId, context.getMigrationId()); + final CopyDatabaseTableEvent event = new CopyDatabaseTableEvent(ownNodeId, context.getMigrationId(), + context.getPropertyOverrideMap()); eventService.publishEvent(event); } } - private void logMigrationContext(final MigrationContext context) { - if (context == null) { - return; - } - LOG.info("--------MIGRATION CONTEXT- START----------"); - LOG.info("isAddMissingColumnsToSchemaEnabled=" + context.isAddMissingColumnsToSchemaEnabled()); - LOG.info("isAddMissingTablesToSchemaEnabled=" + context.isAddMissingTablesToSchemaEnabled()); - LOG.info("isAuditTableMigrationEnabled=" + context.isAuditTableMigrationEnabled()); - LOG.info("isBulkCopyEnabled=" + context.isBulkCopyEnabled()); - LOG.info("isClusterMode=" + context.isClusterMode()); - LOG.info("isDeletionEnabled=" + context.isDeletionEnabled()); - LOG.info("isDisableAllIndexesEnabled=" + context.isDisableAllIndexesEnabled()); - LOG.info("isDropAllIndexesEnabled=" + context.isDropAllIndexesEnabled()); - LOG.info("isFailOnErrorEnabled=" + context.isFailOnErrorEnabled()); - LOG.info("isIncrementalModeEnabled=" + context.isIncrementalModeEnabled()); - LOG.info("isMigrationTriggeredByUpdateProcess=" + context.isMigrationTriggeredByUpdateProcess()); - LOG.info("isRemoveMissingColumnsToSchemaEnabled=" + context.isRemoveMissingColumnsToSchemaEnabled()); - LOG.info("isRemoveMissingTablesToSchemaEnabled=" + context.isRemoveMissingTablesToSchemaEnabled()); - LOG.info("isSchemaMigrationAutoTriggerEnabled=" + context.isSchemaMigrationAutoTriggerEnabled()); - LOG.info("isSchemaMigrationEnabled=" + context.isSchemaMigrationEnabled()); - LOG.info("isTruncateEnabled=" + context.isTruncateEnabled()); - LOG.info("getIncludedTables=" + context.getIncludedTables()); - LOG.info("getExcludedTables=" + context.getExcludedTables()); - LOG.info("getIncrementalTables=" + context.getIncrementalTables()); - LOG.info("getTruncateExcludedTables=" + context.getTruncateExcludedTables()); - LOG.info("getCustomTables=" + context.getCustomTables()); - LOG.info("getIncrementalTimestamp=" + context.getIncrementalTimestamp()); - LOG.info( - "Source TS Name=" + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Source TS Suffix =" - + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info( - "Target TS Name=" + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Target TS Suffix =" - + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info("getItemTypeViewNamePattern=" + context.getItemTypeViewNamePattern()); - - - LOG.info("--------MIGRATION CONTEXT- END----------"); - } - private List> generateSchedulerItemList(CopyContext context, DataRepositoryAdapter dataRepositoryAdapter) throws Exception { + @Override + public void resumeUnfinishedItems(CopyContext copyContext) throws Exception { + databaseCopySchedulerAlgorithm.reset(); + int ownNodeId = databaseCopySchedulerAlgorithm.getOwnNodeId(); + Set failedTasks = databaseCopyTaskRepository.findFailedTasks(copyContext); + for (DatabaseCopyTask failedTask : failedTasks) { + databaseCopyTaskRepository.rescheduleTask(copyContext, failedTask.getPipelinename(), + databaseCopySchedulerAlgorithm.next()); + } + databaseCopyTaskRepository.resetMigration(copyContext); + startMonitorThread(copyContext); + final CopyDatabaseTableEvent event = new CopyDatabaseTableEvent(ownNodeId, copyContext.getMigrationId(), + copyContext.getPropertyOverrideMap()); + eventService.publishEvent(event); + } + + private void logMigrationContext(final MigrationContext context) { + if (!Config.getBoolean("migration.log.context.details", true)) { + return; + } + + LOG.info("--------MIGRATION CONTEXT- START----------"); + LOG.info("isAddMissingColumnsToSchemaEnabled=" + context.isAddMissingColumnsToSchemaEnabled()); + LOG.info("isAddMissingTablesToSchemaEnabled=" + context.isAddMissingTablesToSchemaEnabled()); + LOG.info("isAuditTableMigrationEnabled=" + context.isAuditTableMigrationEnabled()); + LOG.info("isClusterMode=" + context.isClusterMode()); + LOG.info("isDeletionEnabled=" + context.isDeletionEnabled()); + LOG.info("isDisableAllIndexesEnabled=" + context.isDisableAllIndexesEnabled()); + LOG.info("isDropAllIndexesEnabled=" + context.isDropAllIndexesEnabled()); + LOG.info("isFailOnErrorEnabled=" + context.isFailOnErrorEnabled()); + LOG.info("isIncrementalModeEnabled=" + context.isIncrementalModeEnabled()); + LOG.info("isMigrationTriggeredByUpdateProcess=" + context.isMigrationTriggeredByUpdateProcess()); + LOG.info("isRemoveMissingColumnsToSchemaEnabled=" + context.isRemoveMissingColumnsToSchemaEnabled()); + LOG.info("isRemoveMissingTablesToSchemaEnabled=" + context.isRemoveMissingTablesToSchemaEnabled()); + LOG.info("isSchemaMigrationAutoTriggerEnabled=" + context.isSchemaMigrationAutoTriggerEnabled()); + LOG.info("isSchemaMigrationEnabled=" + context.isSchemaMigrationEnabled()); + LOG.info("isTruncateEnabled=" + context.isTruncateEnabled()); + LOG.info("getIncludedTables=" + context.getIncludedTables()); + LOG.info("getExcludedTables=" + context.getExcludedTables()); + LOG.info("getIncrementalTables=" + context.getIncrementalTables()); + LOG.info("getTruncateExcludedTables=" + context.getTruncateExcludedTables()); + LOG.info("getCustomTables=" + context.getCustomTables()); + LOG.info("getIncrementalTimestamp=" + context.getIncrementalTimestamp()); + LOG.info( + "Source TS Name=" + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); + LOG.info("Source TS Suffix=" + + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); + LOG.info( + "Target TS Name=" + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); + LOG.info("Target TS Suffix=" + + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); + LOG.info("getItemTypeViewNamePattern=" + context.getItemTypeViewNamePattern()); + + LOG.info("--------MIGRATION CONTEXT- END----------"); + } + + private List> generateSchedulerItemList(CopyContext context, + DataRepositoryAdapter dataRepositoryAdapter) throws Exception { List> pairs = new ArrayList<>(); for (CopyContext.DataCopyItem copyItem : context.getCopyItems()) { - pairs.add(Pair.of(copyItem, dataRepositoryAdapter.getRowCount(context.getMigrationContext(), copyItem.getSourceItem()))); + pairs.add(Pair.of(copyItem, + dataRepositoryAdapter.getRowCount(context.getMigrationContext(), copyItem.getSourceItem()))); } - //we sort the items to make sure big tables are assigned to nodes in a fair way - return pairs.stream().sorted((p1, p2) -> Long.compare(p1.getRight(), p2.getRight())).collect(Collectors.toList()); + // we sort the items to make sure big tables are assigned to nodes in a fair way + return pairs.stream().sorted(Comparator.comparingLong(Pair::getRight)).collect(Collectors.toList()); } /** @@ -188,7 +207,8 @@ public MigrationStatus getCurrentState(CopyContext context, OffsetDateTime since if (!since.equals(OffsetDateTime.MAX)) { Set updated = databaseCopyTaskRepository.getUpdatedTasks(context, since); List statusUpdates = new ArrayList<>(updated); - statusUpdates.sort(Comparator.comparing(DatabaseCopyTask::getLastUpdate).thenComparing(DatabaseCopyTask::getPipelinename)); + statusUpdates.sort(Comparator.comparing(DatabaseCopyTask::getLastUpdate) + .thenComparing(DatabaseCopyTask::getPipelinename)); status.setStatusUpdates(statusUpdates); } return status; @@ -197,47 +217,23 @@ public MigrationStatus getCurrentState(CopyContext context, OffsetDateTime since @Override public boolean isAborted(CopyContext context) throws Exception { MigrationStatus current = this.databaseCopyTaskRepository.getMigrationStatus(context); - return MigrationProgress.ABORTED.equals(current.getStatus()); + return MigrationProgress.ABORTED == current.getStatus(); } @Override public void abort(CopyContext context) throws Exception { - this.databaseCopyTaskRepository.setMigrationStatus(context, MigrationProgress.ABORTED); - stopPerformanceProfiling(context); - } - - private void stopPerformanceProfiling(CopyContext context) { - if (context.getPerformanceProfiler() != null) { - context.getPerformanceProfiler().reset(); - } - } - - private List getClusterNodes(CopyContext context) { - if (!context.getMigrationContext().isClusterMode()) { - return Collections.singletonList(clusterService.getClusterId()); - } - final List nodeIds = new ArrayList<>(); - try { - // Same code as the hac cluster overview page - PingBroadcastHandler pingBroadcastHandler = PingBroadcastHandler.getInstance(); - pingBroadcastHandler.getNodes().forEach(i -> nodeIds.add(i.getNodeID())); - } catch (final Exception e) { - LOG.warn("Using single cluster node because an error was encountered while fetching cluster nodes information: {{}}", e.getMessage(), e); - } - if (CollectionUtils.isEmpty(nodeIds)) { - nodeIds.add(clusterService.getClusterId()); - } - return nodeIds; - } - - public void setClusterService(ClusterService clusterService) { - this.clusterService = clusterService; + this.databaseCopyTaskRepository.setMigrationStatus(context, MigrationProgress.RUNNING, + MigrationProgress.ABORTED); } public void setDatabaseCopyTaskRepository(DatabaseCopyTaskRepository databaseCopyTaskRepository) { this.databaseCopyTaskRepository = databaseCopyTaskRepository; } + public void setDatabaseCopySchedulerAlgorithm(DatabaseCopySchedulerAlgorithm databaseCopySchedulerAlgorithm) { + this.databaseCopySchedulerAlgorithm = databaseCopySchedulerAlgorithm; + } + public void setEventService(EventService eventService) { this.eventService = eventService; } @@ -285,12 +281,13 @@ private void pollState() throws Exception { lastUpdate = OffsetDateTime.now(ZoneOffset.UTC); // setting deletion - if(context.getMigrationContext().isDeletionEnabled()){ + if (context.getMigrationContext().isDeletionEnabled()) { currentState.setDeletionEnabled(true); } logState(currentState); - Duration elapsedTillLastUpdate = Duration.between(currentState.getLastUpdate().toInstant(ZoneOffset.UTC), Instant.now()); + Duration elapsedTillLastUpdate = Duration + .between(currentState.getLastUpdate().toInstant(ZoneOffset.UTC), Instant.now()); int stalledTimeout = context.getMigrationContext().getStalledTimeout(); if (elapsedTillLastUpdate.compareTo(Duration.of(stalledTimeout, ChronoUnit.SECONDS)) >= 0) { LOG.error("Migration stalled!"); @@ -304,7 +301,8 @@ private void pollState() throws Exception { * Notifies nodes about termination */ private void notifyFinished() { - final CopyCompleteEvent completeEvent = new CopyCompleteEvent(clusterService.getClusterId(), context.getMigrationId()); + final CopyCompleteEvent completeEvent = new CopyCompleteEvent(databaseCopySchedulerAlgorithm.getOwnNodeId(), + context.getMigrationId()); eventService.publishEvent(completeEvent); } @@ -316,27 +314,33 @@ private void notifyFinished() { private void logState(MigrationStatus status) { for (final DatabaseCopyTask copyTask : status.getStatusUpdates()) { try (MDC.MDCCloseable ignore = MDC.putCloseable(MDC_PIPELINE, copyTask.getPipelinename()); - MDC.MDCCloseable ignore2 = MDC.putCloseable(MDC_CLUSTERID, String.valueOf(copyTask.getTargetnodeId()))) { + MDC.MDCCloseable ignore2 = MDC.putCloseable(MDC_CLUSTERID, + String.valueOf(copyTask.getTargetnodeId()))) { if (copyTask.isFailure()) { - LOG.error("{}/{} processed. FAILED in {{}}. Cause: {{}} Last Update: {{}}", copyTask.getTargetrowcount(), copyTask.getSourcerowcount(), copyTask.getDuration(), copyTask.getError(), copyTask.getLastUpdate()); + LOG.error("{}/{} processed. FAILED in {{}}. Cause: {{}} Last Update: {{}}", + copyTask.getTargetrowcount(), copyTask.getSourcerowcount(), copyTask.getDuration(), + copyTask.getError(), copyTask.getLastUpdate()); } else if (copyTask.isCompleted()) { - LOG.info("{}/{} processed. Completed in {{}}. Last Update: {{}}", copyTask.getTargetrowcount(), copyTask.getSourcerowcount(), copyTask.getDuration(), copyTask.getLastUpdate()); + LOG.info("{}/{} processed. Completed in {{}}. Last Update: {{}}", copyTask.getTargetrowcount(), + copyTask.getSourcerowcount(), copyTask.getDuration(), copyTask.getLastUpdate()); } else { - LOG.debug("{}/{} processed. Last Update: {{}}", copyTask.getTargetrowcount(), copyTask.getSourcerowcount(), copyTask.getLastUpdate()); + LOG.debug("{}/{} processed. Last Update: {{}}", copyTask.getTargetrowcount(), + copyTask.getSourcerowcount(), copyTask.getLastUpdate()); } } } - LOG.info("{}/{} tables migrated. {} failed. State: {}", status.getCompletedTasks(), status.getTotalTasks(), status.getFailedTasks(), status.getStatus()); + LOG.info("{}/{} tables migrated. {} failed. State: {}", status.getCompletedTasks(), status.getTotalTasks(), + status.getFailedTasks(), status.getStatus()); if (status.isCompleted()) { String endState = "finished"; if (status.isFailed()) { endState = "FAILED"; } - LOG.info("Migration {} ({}) in {}", endState, status.getStatus(), DurationFormatUtils.formatDurationHMS(Duration.between(status.getStart(), status.getEnd()).toMillis())); + LOG.info("Migration {} ({}) in {}", endState, status.getStatus(), DurationFormatUtils + .formatDurationHMS(Duration.between(status.getStart(), status.getEnd()).toMillis())); } } - protected void prepareThread() { MDC.setContextMap(contextMap); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/RoundRobinClusterSchedulerAlgorithm.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/RoundRobinClusterSchedulerAlgorithm.java new file mode 100644 index 0000000..f4915ad --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/scheduler/impl/RoundRobinClusterSchedulerAlgorithm.java @@ -0,0 +1,84 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.scheduler.impl; + +import com.google.common.collect.ImmutableList; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.scheduler.DatabaseCopySchedulerAlgorithm; +import de.hybris.platform.cluster.PingBroadcastHandler; +import de.hybris.platform.servicelayer.cluster.ClusterService; +import org.apache.commons.collections4.CollectionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +public class RoundRobinClusterSchedulerAlgorithm implements DatabaseCopySchedulerAlgorithm { + + private static final Logger LOG = LoggerFactory.getLogger(RoundRobinClusterSchedulerAlgorithm.class); + + private final MigrationContext migrationContext; + + private final ClusterService clusterService; + + private List nodeIds = null; + + private int nodeIndex = 0; + + public RoundRobinClusterSchedulerAlgorithm(MigrationContext migrationContext, ClusterService clusterService) { + this.migrationContext = migrationContext; + this.clusterService = clusterService; + } + + @Override + public int getOwnNodeId() { + return clusterService.getClusterId(); + } + + @Override + public List getNodeIds() { + if (nodeIds == null) { + nodeIds = ImmutableList.copyOf(detectClusterNodes()); + } + return nodeIds; + } + + @Override + public int next() { + if (nodeIndex >= (getNodeIds().size())) { + nodeIndex = 0; + } + return getNodeIds().get(nodeIndex++); + } + + public void reset() { + nodeIds = null; + nodeIndex = 0; + } + + private List detectClusterNodes() { + if (!migrationContext.isClusterMode()) { + return Collections.singletonList(clusterService.getClusterId()); + } + final List nodeIdList = new ArrayList<>(); + try { + // Same code as the hac cluster overview page + PingBroadcastHandler pingBroadcastHandler = PingBroadcastHandler.getInstance(); + pingBroadcastHandler.getNodes().forEach(i -> nodeIdList.add(i.getNodeID())); + } catch (final Exception e) { + LOG.warn( + "Using single cluster node because an error was encountered while fetching cluster nodes information: {{}}", + e.getMessage(), e); + } + if (CollectionUtils.isEmpty(nodeIdList)) { + nodeIdList.add(clusterService.getClusterId()); + } + return nodeIdList; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java index a4d0131..f5bbd87 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseCopyTaskRepository.java @@ -1,23 +1,33 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service; import com.sap.cx.boosters.commercedbsync.MigrationProgress; import com.sap.cx.boosters.commercedbsync.MigrationStatus; import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.context.CopyContext.DataCopyItem; -import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTask; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import java.time.OffsetDateTime; +import java.util.Collection; +import java.util.Optional; import java.util.Set; /** * Repository to manage Migration Status and Tasks */ public interface DatabaseCopyTaskRepository { + /** + * Get migration ID of most recent migration found + * + * @param context + * @return migration ID + */ + String getMostRecentMigrationID(MigrationContext context); /** * Creates a new DB Migration status record @@ -27,6 +37,14 @@ public interface DatabaseCopyTaskRepository { */ void createMigrationStatus(CopyContext context) throws Exception; + /** + * Resets the values of the current migration to start it again + * + * @param context + * @throws Exception + */ + void resetMigration(CopyContext context) throws Exception; + /** * Updates the Migration status record * @@ -36,7 +54,6 @@ public interface DatabaseCopyTaskRepository { */ void setMigrationStatus(CopyContext context, MigrationProgress progress) throws Exception; - /** * Updates the Migration status record from one status to another * @@ -57,16 +74,43 @@ public interface DatabaseCopyTaskRepository { */ MigrationStatus getMigrationStatus(CopyContext context) throws Exception; + /** + * Retrieves the currently running migration status + * + * @return status in case of running migration, null otherwise + * @throws Exception + */ + MigrationStatus getRunningMigrationStatus(MigrationContext context); + /** * Schedules a copy Task * - * @param context the migration context - * @param copyItem the item to copy + * @param context + * the migration context + * @param copyItem + * the item to copy * @param sourceRowCount - * @param targetNode the nodeId to perform the copy + * @param targetNode + * the nodeId to perform the copy * @throws Exception */ - void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long sourceRowCount, int targetNode) throws Exception; + void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long sourceRowCount, int targetNode) + throws Exception; + + void rescheduleTask(CopyContext context, String pipelineName, int targetNodeId) throws Exception; + + void scheduleBatch(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId, Object lowerBoundary, + Object upperBoundary) throws Exception; + + void markBatchCompleted(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId) throws Exception; + + void resetPipelineBatches(CopyContext context, CopyContext.DataCopyItem copyItem) throws Exception; + + Set findPendingBatchesForPipeline(CopyContext context, CopyContext.DataCopyItem item) + throws Exception; + + Optional findPipeline(CopyContext context, CopyContext.DataCopyItem dataCopyItem) + throws Exception; /** * Retrieves all pending tasks @@ -77,6 +121,8 @@ public interface DatabaseCopyTaskRepository { */ Set findPendingTasks(CopyContext context) throws Exception; + Set findFailedTasks(CopyContext context) throws Exception; + /** * Updates progress on a Task * @@ -97,6 +143,8 @@ public interface DatabaseCopyTaskRepository { */ void markTaskCompleted(CopyContext context, CopyContext.DataCopyItem copyItem, String duration) throws Exception; + void markTaskTruncated(CopyContext context, CopyContext.DataCopyItem copyItem) throws Exception; + /** * Marks the Task as Failed * @@ -107,25 +155,33 @@ public interface DatabaseCopyTaskRepository { */ void markTaskFailed(CopyContext context, CopyContext.DataCopyItem copyItem, Exception error) throws Exception; + void updateTaskCopyMethod(CopyContext context, CopyContext.DataCopyItem copyItem, String copyMethod) + throws Exception; + + void updateTaskKeyColumns(CopyContext context, CopyContext.DataCopyItem copyItem, Collection keyColumns) + throws Exception; + /** * Gets all updated Tasks * * @param context - * @param since offset + * @param since + * offset * @return * @throws Exception */ Set getUpdatedTasks(CopyContext context, OffsetDateTime since) throws Exception; Set getAllTasks(CopyContext context) throws Exception; - /** - * ORACLE_TARGET -- added duration ins econds Marks the Task as Completed - * - * @param context - * @param copyItem - * @param duration - * @throws Exception - */ -void markTaskCompleted(CopyContext context, DataCopyItem copyItem, String duration, float durationseconds) - throws Exception; + + /** + * ORACLE_TARGET -- added duration ins econds Marks the Task as Completed + * + * @param context + * @param copyItem + * @param duration + * @throws Exception + */ + void markTaskCompleted(CopyContext context, DataCopyItem copyItem, String duration, float durationseconds) + throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationCopyService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationCopyService.java index 1694a53..d2c81c2 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationCopyService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationCopyService.java @@ -1,13 +1,13 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service; import com.sap.cx.boosters.commercedbsync.context.CopyContext; - /** * Actual Service to perform the Migration */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationDataTypeMapperService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationDataTypeMapperService.java index a1205b6..bfd1627 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationDataTypeMapperService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationDataTypeMapperService.java @@ -1,14 +1,14 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service; import java.io.IOException; import java.sql.SQLException; - /** * Service to deal with Mapping different types between Databases */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportService.java index b17a958..dae8cfe 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportService.java @@ -1,8 +1,9 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service; import com.sap.cx.boosters.commercedbsync.MigrationReport; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportStorageService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportStorageService.java index 677b3fa..341c40c 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportStorageService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationReportStorageService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationService.java index c2a9e44..39da28a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationService.java @@ -1,34 +1,59 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service; import com.sap.cx.boosters.commercedbsync.MigrationReport; import com.sap.cx.boosters.commercedbsync.MigrationStatus; +import com.sap.cx.boosters.commercedbsync.context.LaunchOptions; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import java.time.OffsetDateTime; +/** + * Entry point to trigger a migration process and track the status + */ public interface DatabaseMigrationService { /** * Asynchronously start a new database migration * - * @param context Migration configuration + * @param context + * Migration configuration + * @param launchOptions * @return migrationID of the started migration - * @throws Exception if anything goes wrong during start + * @throws Exception + * if anything goes wrong during start + */ + String startMigration(MigrationContext context, LaunchOptions launchOptions) throws Exception; + + /** + * Asynchronously resumes the migrations and continuous with unfinished items. + * + * @param context + * Migration configuration + * @param launchOptions + * @param migrationID + * The id of the migration to resume + * @throws Exception + * if anything goes wrong during start */ - String startMigration(MigrationContext context) throws Exception; + void resumeUnfinishedMigration(MigrationContext context, LaunchOptions launchOptions, String migrationID) + throws Exception; // TODO use this from hac/job /** - * Stops the the database migration process. - * The process is stopped on all nodes, in case clustering is used. + * Stops the the database migration process. The process is stopped on all + * nodes, in case clustering is used. * - * @param context Migration configuration - * @param migrationID ID of the migration process that should be stopped - * @throws Exception if anything goes wrong + * @param context + * Migration configuration + * @param migrationID + * ID of the migration process that should be stopped + * @throws Exception + * if anything goes wrong */ void stopMigration(MigrationContext context, String migrationID) throws Exception; @@ -47,23 +72,26 @@ public interface DatabaseMigrationService { * * @param context * @param migrationID - * @param since Get all updates since this timestamp. Must be in UTC! + * @param since + * Get all updates since this timestamp. Must be in UTC! * @return * @throws Exception */ - MigrationStatus getMigrationState(MigrationContext context, String migrationID, OffsetDateTime since) throws Exception; + MigrationStatus getMigrationState(MigrationContext context, String migrationID, OffsetDateTime since) + throws Exception; MigrationReport getMigrationReport(MigrationContext context, String migrationID) throws Exception; String getMigrationID(MigrationContext migrationContext); - + /** * Busy wait until migration is done. Use only for tests! * * @param context * @param migrationID * @return - * @throws Exception when migration was not successful + * @throws Exception + * when migration was not successful */ MigrationStatus waitForFinish(MigrationContext context, String migrationID) throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationSynonymService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationSynonymService.java deleted file mode 100644 index 50f97a2..0000000 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseMigrationSynonymService.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -package com.sap.cx.boosters.commercedbsync.service; - -import com.sap.cx.boosters.commercedbsync.repository.DataRepository; - -public interface DatabaseMigrationSynonymService { - - /** - * CCv2 Workaround: ccv2 builder does not support prefixes yet. - * creating synonym on ydeployments -> prefix_yeployments - * creating synonym on attributedescriptors -> prefix_attributedescriptors. - * - * @param repository - * @param prefix - * @throws Exception - */ - void recreateSynonyms(DataRepository repository, String prefix) throws Exception; -} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseSchemaDifferenceService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseSchemaDifferenceService.java index daecf92..487572b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseSchemaDifferenceService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/DatabaseSchemaDifferenceService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -26,5 +26,6 @@ public interface DatabaseSchemaDifferenceService { * @param migrationContext * @return */ - DefaultDatabaseSchemaDifferenceService.SchemaDifferenceResult getDifference(MigrationContext migrationContext) throws Exception; + DefaultDatabaseSchemaDifferenceService.SchemaDifferenceResult getDifference(MigrationContext migrationContext) + throws Exception; } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/BlobDatabaseMigrationReportStorageService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/BlobDatabaseMigrationReportStorageService.java index ed59da6..5bd05bb 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/BlobDatabaseMigrationReportStorageService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/BlobDatabaseMigrationReportStorageService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -27,31 +27,32 @@ public class BlobDatabaseMigrationReportStorageService implements DatabaseMigrationReportStorageService { - private static final Logger LOG = LoggerFactory.getLogger(BlobDatabaseMigrationReportStorageService.class.getName()); - - private static final String ROOT_CONTAINER = "migration"; + private static final Logger LOG = LoggerFactory + .getLogger(BlobDatabaseMigrationReportStorageService.class.getName()); private CloudBlobClient cloudBlobClient; private MigrationContext migrationContext; protected void init() throws Exception { - CloudStorageAccount account = CloudStorageAccount.parse(migrationContext.getMigrationReportConnectionString()); + LOG.info("Connecting to blob storage {}", migrationContext.getFileStorageConnectionString()); + CloudStorageAccount account = CloudStorageAccount.parse(migrationContext.getFileStorageConnectionString()); this.cloudBlobClient = account.createCloudBlobClient(); } @Override public void store(String fileName, InputStream inputStream) throws Exception { - String path = fileName; + final String containerName = migrationContext.getFileStorageContainerName(); if (inputStream != null) { - CloudBlockBlob blob = getContainer(ROOT_CONTAINER, true).getBlockBlobReference(path); + CloudBlockBlob blob = getContainer(containerName, true).getBlockBlobReference(fileName); byte[] bytes = IOUtils.toByteArray(inputStream); ByteArrayInputStream bis = new ByteArrayInputStream(bytes); blob.upload(bis, bytes.length); bis.close(); - LOG.info("File {} written to blob storage at {}/{}", path, ROOT_CONTAINER, path); + LOG.info("File {} written to blob storage at {}/{}", fileName, containerName, fileName); } else { - throw new IllegalArgumentException(String.format("Input Stream is null for root '%s' and path '%s'", ROOT_CONTAINER, path)); + throw new IllegalArgumentException( + String.format("Input Stream is null for root '%s' and path '%s'", containerName, fileName)); } } @@ -65,17 +66,21 @@ protected CloudBlobContainer getContainer(String name, boolean createIfNotExists public List listAllReports() throws Exception { getCloudBlobClient(); - Iterable migrationBlobs = cloudBlobClient.getContainerReference(ROOT_CONTAINER).listBlobs(); + final String containerName = migrationContext.getFileStorageContainerName(); + Iterable migrationBlobs = cloudBlobClient.getContainerReference(containerName).listBlobs(); List result = new ArrayList<>(); migrationBlobs.forEach(blob -> { - result.add((CloudBlockBlob) blob); + if (blob instanceof CloudBlockBlob && ((CloudBlockBlob) blob).getName().endsWith(".json")) { + result.add((CloudBlockBlob) blob); + } }); return result; } public byte[] getReport(String reportId) throws Exception { checkReportIdValid(reportId); - CloudBlob blob = cloudBlobClient.getContainerReference(ROOT_CONTAINER).getBlobReferenceFromServer(reportId); + final String containerName = migrationContext.getFileStorageContainerName(); + CloudBlob blob = cloudBlobClient.getContainerReference(containerName).getBlobReferenceFromServer(reportId); byte[] output = new byte[blob.getStreamWriteSizeInBytes()]; blob.downloadToByteArray(output, 0); return output; diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java index 143441e..b7d62f8 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseCopyTaskRepository.java @@ -1,22 +1,31 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service.impl; +import com.google.common.base.Joiner; +import com.google.common.base.Splitter; import com.google.gson.Gson; import com.google.gson.reflect.TypeToken; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.performance.PerformanceCategory; import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; +import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import de.hybris.platform.servicelayer.cluster.ClusterService; import org.apache.commons.lang3.StringUtils; import com.sap.cx.boosters.commercedbsync.MigrationProgress; import com.sap.cx.boosters.commercedbsync.MigrationStatus; import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyBatch; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTask; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; +import org.apache.commons.lang3.exception.ExceptionUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; import java.sql.Connection; import java.sql.PreparedStatement; @@ -27,33 +36,67 @@ import java.time.LocalDateTime; import java.time.OffsetDateTime; import java.util.Calendar; +import java.util.Collection; import java.util.Collections; import java.util.HashSet; +import java.util.LinkedHashSet; import java.util.Map; import java.util.Objects; +import java.util.Optional; import java.util.Set; import java.util.TimeZone; -import javax.sql.DataSource; - +import static com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants.MIGRATION_TABLESPREFIX; /** - * Repository to manage the status on of the migration copy tasks across the cluster + * Repository to manage the status on of the migration copy tasks across the + * cluster */ public class DefaultDatabaseCopyTaskRepository implements DatabaseCopyTaskRepository { + private static final Logger LOG = LoggerFactory.getLogger(DefaultDatabaseCopyTaskRepository.class); + private ClusterService clusterService; + private static final String TABLECOPYSTATUS = MIGRATION_TABLESPREFIX + "TABLECOPYSTATUS"; + private static final String TABLECOPYTASKS = MIGRATION_TABLESPREFIX + "TABLECOPYTASKS"; + private static final String TABLECOPYBATCHES = MIGRATION_TABLESPREFIX + "TABLECOPYBATCHES"; + @Override - public void createMigrationStatus(CopyContext context) throws Exception { - String insert = "INSERT INTO MIGRATIONTOOLKIT_TABLECOPYSTATUS (migrationId, total) VALUES (?, ?)"; + public String getMostRecentMigrationID(MigrationContext context) { + String query = "SELECT migrationId FROM " + TABLECOPYSTATUS; try (Connection conn = getConnection(context); - PreparedStatement stmt = conn.prepareStatement(insert) - ) { + PreparedStatement stmt = conn.prepareStatement(query); + ResultSet rs = stmt.executeQuery()) { + if (rs.next()) { + return rs.getString("migrationId"); + } + } catch (final Exception e) { + LOG.error("Couldn't fetch `migrationId` due to: {}", ExceptionUtils.getRootCauseMessage(e)); + } + return null; + } + + @Override + public void createMigrationStatus(CopyContext context) throws Exception { + String insert = "INSERT INTO " + TABLECOPYSTATUS + " (migrationId, total) VALUES (?, ?)"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { stmt.setObject(1, context.getMigrationId()); stmt.setObject(2, context.getCopyItems().size()); stmt.executeUpdate(); - conn.commit(); + } + } + + @Override + public void resetMigration(CopyContext context) throws Exception { + String update = "UPDATE " + TABLECOPYSTATUS + + " SET completed = total - failed, status = ?, failed=?, lastUpdate=? WHERE migrationId = ?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(update)) { + stmt.setObject(1, MigrationProgress.RUNNING.name()); + stmt.setObject(2, 0); + stmt.setObject(3, now()); + stmt.setObject(4, context.getMigrationId()); + stmt.executeUpdate(); } } @@ -62,30 +105,22 @@ public void setMigrationStatus(CopyContext context, MigrationProgress progress) setMigrationStatus(context, MigrationProgress.RUNNING, progress); } - @Override - public boolean setMigrationStatus(CopyContext context, MigrationProgress from, MigrationProgress to) throws Exception - { - boolean updated = false; - final String update = "UPDATE MIGRATIONTOOLKIT_TABLECOPYSTATUS SET status = ? WHERE status = ? AND migrationId = ?"; - final DataSource dataSource = context.getMigrationContext().getDataTargetRepository().getDataSource(); - - try (Connection conn = dataSource.getConnection(); PreparedStatement stmt = conn.prepareStatement(update)) - { - stmt.setObject(1, to.name()); - stmt.setObject(2, from.name()); - stmt.setObject(3, context.getMigrationId()); - updated = stmt.executeUpdate() > 0; - } - - return updated; - } + @Override + public boolean setMigrationStatus(CopyContext context, MigrationProgress from, MigrationProgress to) + throws Exception { + final String update = "UPDATE " + TABLECOPYSTATUS + " SET status = ? WHERE status = ? AND migrationId = ?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(update)) { + stmt.setObject(1, to.name()); + stmt.setObject(2, from.name()); + stmt.setObject(3, context.getMigrationId()); + return stmt.executeUpdate() > 0; + } + } @Override public MigrationStatus getMigrationStatus(CopyContext context) throws Exception { - String query = "SELECT * FROM MIGRATIONTOOLKIT_TABLECOPYSTATUS WHERE migrationId = ?"; - try (Connection conn = getConnection(context); - PreparedStatement stmt = conn.prepareStatement(query) - ) { + String query = "SELECT * FROM " + TABLECOPYSTATUS + " WHERE migrationId = ?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(query)) { stmt.setObject(1, context.getMigrationId()); try (ResultSet rs = stmt.executeQuery()) { rs.next(); @@ -94,12 +129,29 @@ public MigrationStatus getMigrationStatus(CopyContext context) throws Exception } } + @Override + public MigrationStatus getRunningMigrationStatus(MigrationContext context) { + String query = "SELECT * FROM " + TABLECOPYSTATUS + " WHERE status = 'RUNNING'"; + try (Connection conn = getConnection(context); + PreparedStatement stmt = conn.prepareStatement(query); + ResultSet rs = stmt.executeQuery()) { + if (rs.next()) { + return convertToStatus(rs); + } + } catch (Exception e) { + LOG.debug("Failed to check current migration status due to: {}", e.getMessage()); + } + + return null; + } + /** - * @param rs result set to covert + * @param rs + * result set to covert * @return the equivalent Migration Status * @throws Exception */ - private MigrationStatus convertToStatus(ResultSet rs) throws Exception { + public MigrationStatus convertToStatus(ResultSet rs) throws Exception { MigrationStatus status = new MigrationStatus(); status.setMigrationID(rs.getString("migrationId")); status.setStart(getDateTime(rs, "startAt")); @@ -110,8 +162,10 @@ private MigrationStatus convertToStatus(ResultSet rs) throws Exception { status.setFailedTasks(rs.getInt("failed")); status.setStatus(MigrationProgress.valueOf(rs.getString("status"))); - status.setCompleted(status.getTotalTasks() == status.getCompletedTasks() || MigrationProgress.STALLED.equals(status.getStatus())); - status.setFailed(status.getFailedTasks() > 0 || MigrationProgress.STALLED.equals(status.getStatus())); + status.setCompleted(status.getTotalTasks() == status.getCompletedTasks() + || MigrationProgress.STALLED == status.getStatus()); + status.setFailed(status.getFailedTasks() > 0 || MigrationProgress.STALLED == status.getStatus()); + status.setAborted(MigrationProgress.ABORTED == status.getStatus()); status.setStatusUpdates(Collections.emptyList()); return status; @@ -122,13 +176,12 @@ private LocalDateTime getDateTime(ResultSet rs, String column) throws Exception return ts == null ? null : ts.toLocalDateTime(); } - @Override - public void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long sourceRowCount, int targetNode) throws Exception { - String insert = "INSERT INTO MIGRATIONTOOLKIT_TABLECOPYTASKS (targetnodeid, pipelinename, sourcetablename, targettablename, columnmap, migrationid, sourcerowcount, lastupdate) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"; - try (Connection conn = getConnection(context); - PreparedStatement stmt = conn.prepareStatement(insert) - ) { + public void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, long sourceRowCount, + int targetNode) throws Exception { + String insert = "INSERT INTO " + TABLECOPYTASKS + + " (targetnodeid, pipelinename, sourcetablename, targettablename, columnmap, migrationid, sourcerowcount, lastupdate) VALUES (?, ?, ?, ?, ?, ?, ?, ?)"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { stmt.setObject(1, targetNode); stmt.setObject(2, copyItem.getPipelineName()); stmt.setObject(3, copyItem.getSourceItem()); @@ -138,7 +191,77 @@ public void scheduleTask(CopyContext context, CopyContext.DataCopyItem copyItem, stmt.setObject(7, sourceRowCount); setTimestamp(stmt, 8, now()); stmt.executeUpdate(); - conn.commit(); + } + } + + @Override + public void rescheduleTask(CopyContext context, String pipelineName, int targetNode) throws Exception { + String sql = "UPDATE " + TABLECOPYTASKS + + " SET failure='0', duration=NULL, error='', targetnodeid=?, lastupdate=? WHERE migrationId=? AND pipelinename=? "; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, targetNode); + setTimestamp(stmt, 2, now()); + stmt.setObject(3, context.getMigrationId()); + stmt.setObject(4, pipelineName); + stmt.executeUpdate(); + } + } + + @Override + public void scheduleBatch(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId, Object lowerBoundary, + Object upperBoundary) throws Exception { + LOG.debug("Schedule Batch for {} with ID {}", copyItem.getPipelineName(), batchId); + String insert = "INSERT INTO " + TABLECOPYBATCHES + + " (migrationId, batchId, pipelinename, lowerBoundary, upperBoundary) VALUES (?, ?, ?, ?, ?)"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, batchId); + stmt.setObject(3, copyItem.getPipelineName()); + stmt.setObject(4, lowerBoundary); + stmt.setObject(5, upperBoundary); + stmt.executeUpdate(); + } + } + + @Override + public void markBatchCompleted(CopyContext context, CopyContext.DataCopyItem copyItem, int batchId) + throws Exception { + LOG.debug("Mark batch completed for {} with ID {}", copyItem.getPipelineName(), batchId); + String insert = "DELETE FROM " + TABLECOPYBATCHES + " WHERE migrationId=? AND batchId=? AND pipelinename=?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, batchId); + stmt.setObject(3, copyItem.getPipelineName()); + // exactly one batch record should be affected + if (stmt.executeUpdate() != 1) { + throw new IllegalStateException("No (exact) match for batch with id '" + batchId + "' found."); + } + } + } + + @Override + public void resetPipelineBatches(CopyContext context, CopyContext.DataCopyItem copyItem) throws Exception { + String insert = "DELETE FROM " + TABLECOPYBATCHES + " WHERE migrationId=? AND pipelinename=?"; + try (Connection conn = getConnection(context); PreparedStatement stmt = conn.prepareStatement(insert)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, copyItem.getPipelineName()); + stmt.executeUpdate(); + } + } + + @Override + public Set findPendingBatchesForPipeline(CopyContext context, CopyContext.DataCopyItem item) + throws Exception { + String sql = "SELECT * FROM " + TABLECOPYBATCHES + + " WHERE migrationid=? AND pipelinename=? ORDER BY batchId ASC"; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, context.getMigrationId()); + stmt.setObject(2, item.getPipelineName()); + try (ResultSet resultSet = stmt.executeQuery()) { + return convertToBatch(resultSet); + } } } @@ -149,16 +272,60 @@ private Timestamp now() { } private Connection getConnection(CopyContext context) throws Exception { - return context.getMigrationContext().getDataTargetRepository().getConnection(); + return getConnection(context.getMigrationContext()); } + private Connection getConnection(MigrationContext context) throws Exception { + final DataRepository repository = !context.isDataExportEnabled() + ? context.getDataTargetRepository() + : context.getDataSourceRepository(); + /* + * if (!repository.getDatabaseProvider().isMssqlUsed()) { throw new + * IllegalStateException("Scheduler tables requires MSSQL database"); } + */ + return repository.getConnection(); + } + + @Override + public Optional findPipeline(CopyContext context, CopyContext.DataCopyItem dataCopyItem) + throws Exception { + String sql = "SELECT * FROM " + TABLECOPYTASKS + " WHERE targetnodeid=? AND migrationid=? AND pipelinename=?"; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, getTargetNodeId()); + stmt.setObject(2, context.getMigrationId()); + stmt.setObject(3, dataCopyItem.getPipelineName()); + try (ResultSet resultSet = stmt.executeQuery()) { + Set databaseCopyTasks = convertToTask(resultSet); + if (databaseCopyTasks.size() > 1) { + throw new IllegalStateException( + "Invalid scheduler table, cannot have same pipeline multiple times."); + } + return databaseCopyTasks.stream().findFirst(); + } + } + } @Override public Set findPendingTasks(CopyContext context) throws Exception { - String sql = "SELECT * from MIGRATIONTOOLKIT_TABLECOPYTASKS WHERE targetnodeid=? AND migrationid=? AND duration IS NULL ORDER BY sourcerowcount"; + String sql = "SELECT * FROM " + TABLECOPYTASKS + + " WHERE targetnodeid=? AND migrationid=? AND duration IS NULL ORDER BY sourcerowcount"; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, getTargetNodeId()); + stmt.setObject(2, context.getMigrationId()); + try (ResultSet resultSet = stmt.executeQuery()) { + return convertToTask(resultSet); + } + } + } + + @Override + public Set findFailedTasks(CopyContext context) throws Exception { + String sql = "SELECT * FROM " + TABLECOPYTASKS + + " WHERE targetnodeid=? AND migrationid=? AND (duration = '-1' AND failure = '1') ORDER BY sourcerowcount"; try (Connection connection = getConnection(context); - PreparedStatement stmt = connection.prepareStatement(sql) - ) { + PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, getTargetNodeId()); stmt.setObject(2, context.getMigrationId()); try (ResultSet resultSet = stmt.executeQuery()) { @@ -168,17 +335,12 @@ public Set findPendingTasks(CopyContext context) throws Except } @Override - public void updateTaskProgress(CopyContext context, CopyContext.DataCopyItem copyItem, long itemCount) throws Exception { - String sql = "UPDATE MIGRATIONTOOLKIT_TABLECOPYTASKS " + - "SET targetrowcount=?, " + - "lastupdate=?, " + - "avgwriterrowthroughput=?, " + - "avgreaderrowthroughput=? " + - "WHERE targetnodeid=? " + - "AND migrationid=? " + - "AND pipelinename=?"; + public void updateTaskProgress(CopyContext context, CopyContext.DataCopyItem copyItem, long itemCount) + throws Exception { + String sql = "UPDATE " + TABLECOPYTASKS + + " SET targetrowcount=?, lastupdate=?, avgwriterrowthroughput=?, avgreaderrowthroughput=? WHERE targetnodeid=? AND migrationid=? AND pipelinename=?"; try (Connection connection = getConnection(context); - PreparedStatement stmt = connection.prepareStatement(sql)) { + PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, itemCount); setTimestamp(stmt, 2, now()); stmt.setObject(3, getAvgPerformanceValue(context, PerformanceCategory.DB_WRITE, copyItem.getTargetItem())); @@ -187,7 +349,6 @@ public void updateTaskProgress(CopyContext context, CopyContext.DataCopyItem cop stmt.setObject(6, context.getMigrationId()); stmt.setObject(7, copyItem.getPipelineName()); stmt.executeUpdate(); - connection.commit(); } } @@ -196,53 +357,40 @@ protected void setTimestamp(PreparedStatement stmt, int i, Timestamp ts) throws } public void markTaskCompleted(final CopyContext context, final CopyContext.DataCopyItem copyItem, - final String duration) throws Exception { + final String duration) throws Exception { markTaskCompleted(context, copyItem, duration, 0); } + @Override - // ORACLE_TARGET - added durationInseconds public void markTaskCompleted(final CopyContext context, final CopyContext.DataCopyItem copyItem, - final String duration, final float durationseconds) throws Exception { + final String duration, final float durationseconds) throws Exception { Objects.requireNonNull(duration, "duration must not be null"); - String sql = "UPDATE MIGRATIONTOOLKIT_TABLECOPYTASKS " + - "SET duration=?, " + - "lastupdate=?, " + - "avgwriterrowthroughput=?, " + - "avgreaderrowthroughput=?, " + - "durationinseconds=? " + - "WHERE targetnodeid=? " + - "AND migrationid=? " + - "AND pipelinename=? " + - "AND duration IS NULL"; + // spotless:off + String sql = "UPDATE " + TABLECOPYTASKS + " SET duration=?, lastupdate=?, avgwriterrowthroughput=?, avgreaderrowthroughput=?, durationinseconds=? WHERE targetnodeid=? AND migrationid=? AND pipelinename=? AND duration IS NULL"; + // spotless:on try (Connection connection = getConnection(context); - PreparedStatement stmt = connection.prepareStatement(sql)) { + PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, duration); setTimestamp(stmt, 2, now()); stmt.setObject(3, getAvgPerformanceValue(context, PerformanceCategory.DB_WRITE, copyItem.getTargetItem())); stmt.setObject(4, getAvgPerformanceValue(context, PerformanceCategory.DB_READ, copyItem.getSourceItem())); - // ORACLE_TARGET - added durationInseconds stmt.setFloat(5, durationseconds); stmt.setObject(6, getTargetNodeId()); stmt.setObject(7, context.getMigrationId()); stmt.setObject(8, copyItem.getPipelineName()); stmt.executeUpdate(); - connection.commit(); } mutePerformanceRecorder(context, copyItem); } @Override - public void markTaskFailed(CopyContext context, CopyContext.DataCopyItem copyItem, Exception error) throws Exception { - String sql = "UPDATE MIGRATIONTOOLKIT_TABLECOPYTASKS " + - "SET failure='1', duration='-1', " + - "error=?, " + - "lastupdate=? " + - "WHERE targetnodeid=? " + - "AND migrationId=? " + - "AND pipelinename=? " + - "AND failure = '0'"; + public void markTaskFailed(CopyContext context, CopyContext.DataCopyItem copyItem, Exception error) + throws Exception { + // spotless:off + String sql = "UPDATE " + TABLECOPYTASKS + " SET failure='1', duration='-1', error=?, lastupdate=? WHERE targetnodeid=? AND migrationId=? AND pipelinename=? AND failure = '0'"; + // spotless:on try (Connection connection = getConnection(context); - PreparedStatement stmt = connection.prepareStatement(sql)) { + PreparedStatement stmt = connection.prepareStatement(sql)) { String errorMsg = error.getMessage(); if (StringUtils.isBlank(errorMsg)) { errorMsg = error.getClass().getName(); @@ -253,17 +401,58 @@ public void markTaskFailed(CopyContext context, CopyContext.DataCopyItem copyIte stmt.setObject(4, context.getMigrationId()); stmt.setObject(5, copyItem.getPipelineName()); stmt.executeUpdate(); - connection.commit(); } mutePerformanceRecorder(context, copyItem); } + @Override + public void markTaskTruncated(CopyContext context, CopyContext.DataCopyItem copyItem) throws Exception { + String sql = "UPDATE " + TABLECOPYTASKS + + " SET truncated = '1' WHERE targetnodeid=? AND migrationId=? AND pipelinename=? "; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, getTargetNodeId()); + stmt.setObject(2, context.getMigrationId()); + stmt.setObject(3, copyItem.getPipelineName()); + stmt.executeUpdate(); + } + } + + @Override + public void updateTaskCopyMethod(CopyContext context, CopyContext.DataCopyItem copyItem, String copyMethod) + throws Exception { + String sql = "UPDATE " + TABLECOPYTASKS + + " SET copymethod=? WHERE targetnodeid=? AND migrationId=? AND pipelinename=? "; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, copyMethod); + stmt.setObject(2, getTargetNodeId()); + stmt.setObject(3, context.getMigrationId()); + stmt.setObject(4, copyItem.getPipelineName()); + stmt.executeUpdate(); + } + } + + @Override + public void updateTaskKeyColumns(CopyContext context, CopyContext.DataCopyItem copyItem, + Collection keyColumns) throws Exception { + String sql = "UPDATE " + TABLECOPYTASKS + + " SET keycolumns=? WHERE targetnodeid=? AND migrationId=? AND pipelinename=? "; + try (Connection connection = getConnection(context); + PreparedStatement stmt = connection.prepareStatement(sql)) { + stmt.setObject(1, Joiner.on(',').join(keyColumns)); + stmt.setObject(2, getTargetNodeId()); + stmt.setObject(3, context.getMigrationId()); + stmt.setObject(4, copyItem.getPipelineName()); + stmt.executeUpdate(); + } + } + @Override public Set getUpdatedTasks(CopyContext context, OffsetDateTime since) throws Exception { - String sql = "select * from MIGRATIONTOOLKIT_TABLECOPYTASKS WHERE migrationid=? AND lastupdate >= ?"; + String sql = "SELECT * FROM " + TABLECOPYTASKS + " WHERE migrationid=? AND lastupdate >= ?"; try (Connection connection = getConnection(context); - PreparedStatement stmt = connection.prepareStatement(sql); - ) { + PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, context.getMigrationId()); setTimestamp(stmt, 2, toTimestamp(since)); try (ResultSet resultSet = stmt.executeQuery()) { @@ -278,10 +467,9 @@ private Timestamp toTimestamp(OffsetDateTime ts) { @Override public Set getAllTasks(CopyContext context) throws Exception { - String sql = "select * from MIGRATIONTOOLKIT_TABLECOPYTASKS WHERE migrationid=?"; + String sql = "SELECT * FROM " + TABLECOPYTASKS + " WHERE migrationid=?"; try (Connection connection = getConnection(context); - PreparedStatement stmt = connection.prepareStatement(sql); - ) { + PreparedStatement stmt = connection.prepareStatement(sql)) { stmt.setObject(1, context.getMigrationId()); try (ResultSet resultSet = stmt.executeQuery()) { return convertToTask(resultSet); @@ -297,7 +485,6 @@ public void setClusterService(ClusterService clusterService) { this.clusterService = clusterService; } - private Set convertToTask(ResultSet rs) throws Exception { Set copyTasks = new HashSet<>(); while (rs.next()) { @@ -315,20 +502,38 @@ private Set convertToTask(ResultSet rs) throws Exception { copyTask.setTargetrowcount(rs.getLong("targetrowcount")); copyTask.setFailure(rs.getBoolean("failure")); copyTask.setError(rs.getString("error")); + copyTask.setTruncated(rs.getBoolean("truncated")); copyTask.setLastUpdate(getDateTime(rs, "lastupdate")); copyTask.setAvgReaderRowThroughput(rs.getDouble("avgreaderrowthroughput")); copyTask.setAvgWriterRowThroughput(rs.getDouble("avgwriterrowthroughput")); - // ORACLE_TARGET - copyTask.setDurationinseconds(rs.getDouble("durationinseconds")); + copyTask.setDurationinseconds(rs.getDouble("durationinseconds")); + copyTask.setCopyMethod(rs.getString("copymethod")); + copyTask.setKeyColumns(Splitter.on(",") + .splitToList(StringUtils.defaultIfEmpty(rs.getString("keycolumns"), StringUtils.EMPTY))); copyTasks.add(copyTask); } return copyTasks; } + private Set convertToBatch(ResultSet rs) throws Exception { + Set copyBatches = new LinkedHashSet<>(); + while (rs.next()) { + DatabaseCopyBatch copyBatch = new DatabaseCopyBatch(); + copyBatch.setMigrationId(rs.getString("migrationId")); + copyBatch.setBatchId(rs.getString("batchId")); + copyBatch.setPipelinename(rs.getString("pipelinename")); + copyBatch.setLowerBoundary(rs.getString("lowerBoundary")); + copyBatch.setUpperBoundary(rs.getString("upperBoundary")); + copyBatches.add(copyBatch); + } + return copyBatches; + } + private double getAvgPerformanceValue(CopyContext context, PerformanceCategory category, String tableName) { PerformanceRecorder recorder = context.getPerformanceProfiler().getRecorder(category, tableName); if (recorder != null) { - PerformanceRecorder.PerformanceAggregation performanceAggregation = recorder.getRecords().get(PerformanceUnit.ROWS); + PerformanceRecorder.PerformanceAggregation performanceAggregation = recorder.getRecords() + .get(PerformanceUnit.ROWS); if (performanceAggregation != null) { return performanceAggregation.getAvgThroughput().get(); } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationDataTypeMapperService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationDataTypeMapperService.java index cdee576..d1162e6 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationDataTypeMapperService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationDataTypeMapperService.java @@ -1,8 +1,9 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service.impl; import com.google.common.io.ByteStreams; @@ -29,13 +30,13 @@ public class DefaultDatabaseMigrationDataTypeMapperService implements DatabaseMi private static final Logger LOG = LoggerFactory.getLogger(DefaultDatabaseMigrationDataTypeMapperService.class); @Override - public Object dataTypeMapper(final Object sourceColumnValue, final int jdbcType) - throws IOException, SQLException { + public Object dataTypeMapper(final Object sourceColumnValue, final int jdbcType) throws IOException, SQLException { Object targetColumnValue = sourceColumnValue; if (sourceColumnValue == null) { // do nothing } else if (jdbcType == Types.BLOB) { - targetColumnValue = new ByteArrayInputStream(ByteStreams.toByteArray(((Blob) sourceColumnValue).getBinaryStream())); + targetColumnValue = new ByteArrayInputStream( + ByteStreams.toByteArray(((Blob) sourceColumnValue).getBinaryStream())); } else if (jdbcType == Types.NCLOB) { targetColumnValue = getValue((NClob) sourceColumnValue); } else if (jdbcType == Types.CLOB) { diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationReportService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationReportService.java index 993cddf..d70fd4b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationReportService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationReportService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -44,8 +44,9 @@ private void populateConfiguration(MigrationReport migrationReport) { final SortedMap configuration = new TreeMap<>(); final Configuration config = configurationService.getConfiguration(); final Configuration subset = config.subset(CommercedbsyncConstants.PROPERTIES_PREFIX); - final Set maskedProperties = Arrays.stream(config.getString(CommercedbsyncConstants.MIGRATION_REPORT_MASKED_PROPERTIES) - .split(",")).collect(Collectors.toSet()); + final Set maskedProperties = Arrays + .stream(config.getString(CommercedbsyncConstants.MIGRATION_REPORT_MASKED_PROPERTIES).split(",")) + .collect(Collectors.toSet()); final Iterator keys = subset.getKeys(); @@ -57,7 +58,10 @@ private void populateConfiguration(MigrationReport migrationReport) { continue; } - configuration.put(prefixedKey, maskedProperties.contains(prefixedKey) ? CommercedbsyncConstants.MASKED_VALUE : MaskUtil.stripJdbcPassword(subset.getString(key))); + configuration.put(prefixedKey, + maskedProperties.contains(prefixedKey) + ? CommercedbsyncConstants.MASKED_VALUE + : MaskUtil.stripJdbcPassword(subset.getString(key))); } migrationReport.setConfiguration(configuration); diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java index 8d640ef..dd14e4f 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationService.java @@ -1,26 +1,29 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsync.service.impl; -import java.sql.Connection; -import java.sql.PreparedStatement; -import java.sql.ResultSet; import java.time.OffsetDateTime; import java.util.ArrayList; import java.util.Set; import java.util.UUID; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; +import de.hybris.platform.task.TaskEngine; +import de.hybris.platform.task.TaskService; +import com.sap.cx.boosters.commercedbsync.MigrationProgress; import com.sap.cx.boosters.commercedbsync.MigrationReport; import com.sap.cx.boosters.commercedbsync.MigrationStatus; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.context.LaunchOptions; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import com.sap.cx.boosters.commercedbsync.context.validation.MigrationContextValidator; import com.sap.cx.boosters.commercedbsync.performance.PerformanceProfiler; @@ -34,20 +37,45 @@ public class DefaultDatabaseMigrationService implements DatabaseMigrationService { private static final Logger LOG = LoggerFactory.getLogger(DefaultDatabaseMigrationService.class); - + private DatabaseCopyScheduler databaseCopyScheduler; private CopyItemProvider copyItemProvider; private PerformanceProfiler performanceProfiler; private DatabaseMigrationReportService databaseMigrationReportService; private DatabaseSchemaDifferenceService schemaDifferenceService; private MigrationContextValidator migrationContextValidator; + private TaskService taskService; + private DatabaseCopyTaskRepository databaseCopyTaskRepository; private ArrayList preProcessors; @Override - public String startMigration(final MigrationContext context) throws Exception { + public String startMigration(final MigrationContext context, LaunchOptions launchOptions) throws Exception { migrationContextValidator.validateContext(context); + + final MigrationStatus runningMigrationStatus = databaseCopyTaskRepository.getRunningMigrationStatus(context); + + if (runningMigrationStatus != null && runningMigrationStatus.getStatus() == MigrationProgress.RUNNING) { + LOG.debug("Found already running migration with ID: {}", runningMigrationStatus.getMigrationID()); + + return runningMigrationStatus.getMigrationID(); + } + + if (!context.isDataExportEnabled()) { + TaskEngine engine = taskService.getEngine(); + boolean running = engine.isRunning(); + + if (running) { + throw new Exception("Task engine is activated - migration is blocked"); + } + } + performanceProfiler.reset(); + if (context.isLogSql()) { + context.getDataSourceRepository().clearJdbcQueriesStore(); + context.getDataTargetRepository().clearJdbcQueriesStore(); + } + final String migrationId = UUID.randomUUID().toString(); MDC.put(CommercedbsyncConstants.MDC_MIGRATIONID, migrationId); @@ -57,15 +85,25 @@ public String startMigration(final MigrationContext context) throws Exception { } CopyContext copyContext = buildCopyContext(context, migrationId); - - preProcessors.forEach(p -> p.process(copyContext)); - + + copyContext.getPropertyOverrideMap().putAll(launchOptions.getPropertyOverrideMap()); + + preProcessors.stream().filter(p -> p.shouldExecute(copyContext)).forEach(p -> p.process(copyContext)); + databaseCopyScheduler.schedule(copyContext); return migrationId; } - @Override + @Override + public void resumeUnfinishedMigration(MigrationContext context, LaunchOptions launchOptions, String migrationID) + throws Exception { + CopyContext copyContext = buildIdContext(context, migrationID); + copyContext.getPropertyOverrideMap().putAll(launchOptions.getPropertyOverrideMap()); + databaseCopyScheduler.resumeUnfinishedItems(copyContext); + } + + @Override public void stopMigration(MigrationContext context, String migrationID) throws Exception { CopyContext copyContext = buildIdContext(context, migrationID); databaseCopyScheduler.abort(copyContext); @@ -77,7 +115,8 @@ private CopyContext buildCopyContext(MigrationContext context, String migrationI } private CopyContext buildIdContext(MigrationContext context, String migrationID) throws Exception { - //we use a lean implementation of the copy context to avoid calling the provider which is not required for task management. + // we use a lean implementation of the copy context to avoid calling the + // provider which is not required for task management. return new CopyContext.IdCopyContext(migrationID, context, performanceProfiler); } @@ -87,7 +126,8 @@ public MigrationStatus getMigrationState(MigrationContext context, String migrat } @Override - public MigrationStatus getMigrationState(MigrationContext context, String migrationID, OffsetDateTime since) throws Exception { + public MigrationStatus getMigrationState(MigrationContext context, String migrationID, OffsetDateTime since) + throws Exception { CopyContext copyContext = buildIdContext(context, migrationID); return databaseCopyScheduler.getCurrentState(copyContext, since); } @@ -98,26 +138,11 @@ public MigrationReport getMigrationReport(MigrationContext context, String migra return databaseMigrationReportService.getMigrationReport(copyContext); } - @Override - public String getMigrationID(final MigrationContext migrationContext) - { - String migrationId = null; - try (Connection conn = migrationContext.getDataTargetRepository().getConnection(); - PreparedStatement stmt = conn.prepareStatement("SELECT migrationId FROM MIGRATIONTOOLKIT_TABLECOPYSTATUS")) - { - final ResultSet rs = stmt.executeQuery(); - if (rs.next()) - { - migrationId = rs.getString("migrationId"); - } - } - catch (final Exception e) - { - LOG.error("Couldn't fetch migrationId", e); - } - return migrationId; - } - + @Override + public String getMigrationID(MigrationContext context) { + return databaseCopyTaskRepository.getMostRecentMigrationID(context); + } + @Override public MigrationStatus waitForFinish(MigrationContext context, String migrationID) throws Exception { MigrationStatus status; @@ -156,8 +181,16 @@ public void setSchemaDifferenceService(DatabaseSchemaDifferenceService schemaDif public void setMigrationContextValidator(MigrationContextValidator migrationContextValidator) { this.migrationContextValidator = migrationContextValidator; } - - public void setPreProcessors(final ArrayList preProcessors) { - this.preProcessors = preProcessors; - } + + public void setTaskService(TaskService taskService) { + this.taskService = taskService; + } + + public void setDatabaseCopyTaskRepository(DatabaseCopyTaskRepository databaseCopyTaskRepository) { + this.databaseCopyTaskRepository = databaseCopyTaskRepository; + } + + public void setPreProcessors(final ArrayList preProcessors) { + this.preProcessors = preProcessors; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationSynonymService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationSynonymService.java deleted file mode 100644 index 990dc1a..0000000 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseMigrationSynonymService.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -package com.sap.cx.boosters.commercedbsync.service.impl; - -import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; -import com.sap.cx.boosters.commercedbsync.repository.DataRepository; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationSynonymService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -public class DefaultDatabaseMigrationSynonymService implements DatabaseMigrationSynonymService { - - private static final Logger LOG = LoggerFactory.getLogger(DefaultDatabaseMigrationSynonymService.class); - - private static final String YDEPLOYMENTS = CommercedbsyncConstants.DEPLOYMENTS_TABLE; - private static final String ATTRDESCRIPTORS = "attributedescriptors"; - - - @Override - public void recreateSynonyms(DataRepository repository, String prefix) throws Exception { - recreateSynonym(repository, YDEPLOYMENTS, prefix); - recreateSynonym(repository, ATTRDESCRIPTORS, prefix); - } - - private void recreateSynonym(DataRepository repository, String table, String actualPrefix) throws Exception { - LOG.info("Creating Synonym for {} on {}{}", table, actualPrefix, table); - repository.executeUpdateAndCommit(String.format("DROP SYNONYM IF EXISTS %s", table)); - repository.executeUpdateAndCommit(String.format("CREATE SYNONYM %s FOR %s%s", table, actualPrefix, table)); - } -} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java index 7dd6f38..d86d8b4 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/DefaultDatabaseSchemaDifferenceService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -13,6 +13,7 @@ import com.google.gson.GsonBuilder; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; import de.hybris.platform.servicelayer.config.ConfigurationService; +import de.hybris.platform.util.Config; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang3.ObjectUtils; import org.apache.ddlutils.Platform; @@ -42,527 +43,529 @@ import java.util.stream.Stream; public class DefaultDatabaseSchemaDifferenceService implements DatabaseSchemaDifferenceService { - private static final Logger LOG = LoggerFactory.getLogger(DefaultDatabaseSchemaDifferenceService.class); - - private DataCopyTableFilter dataCopyTableFilter; - private DatabaseMigrationReportStorageService databaseMigrationReportStorageService; - private CopyItemProvider copyItemProvider; - private ConfigurationService configurationService; - - @Override - public String generateSchemaDifferencesSql(MigrationContext context) throws Exception { - final int maxStageMigrations = context.getMaxTargetStagedMigrations(); - final Set stagingPrefixes = findStagingPrefixes(context); - String schemaSql = ""; - if (stagingPrefixes.size() > maxStageMigrations) { - final Database databaseModelWithChanges = getDatabaseModelWithChanges4TableDrop(context); - LOG.info("generateSchemaDifferencesSql..getDatabaseModelWithChanges4TableDrop.. - calibrating changes "); - schemaSql = context.getDataTargetRepository().asPlatform().getDropTablesSql(databaseModelWithChanges, true); - LOG.info("generateSchemaDifferencesSql - generated DDL SQLs for DROP. "); - } else { - LOG.info( - "generateSchemaDifferencesSql..getDatabaseModelWithChanges4TableCreation - calibrating Schema changes "); - final DatabaseStatus databaseModelWithChanges = getDatabaseModelWithChanges4TableCreation(context); - if (databaseModelWithChanges.isHasSchemaDiff()) { - LOG.info("generateSchemaDifferencesSql..Schema Diff found - now to generate the SQLs "); - if (context.getDataTargetRepository().getDatabaseProvider().isHanaUsed()){ - schemaSql = context.getDataTargetRepository().asPlatform() - .getAlterTablesSql(null ,context.getDataTargetRepository().getDataSourceConfiguration().getSchema(),null,databaseModelWithChanges.getDatabase()); - } else { - schemaSql = context.getDataTargetRepository().asPlatform() - .getAlterTablesSql(databaseModelWithChanges.getDatabase()); - } - - schemaSql = postProcess(schemaSql, context); - LOG.info("generateSchemaDifferencesSql - generated DDL ALTER SQLs. "); - } - - } - - return schemaSql; - } - - /* - * ORACLE_TARGET - START This a TEMP fix, it is difficlt to get from from - * Sql Server NVARCHAR(255), NVARCHAR(MAX) to convert properly into to - * Orcale's VARCHAR2(255) and CLOB respectively. Therefore when the schema - * script output has VARCHAR2(2147483647) which is from SqlServer's - * NVARCHAR(max), then we just make it CLOB. Alternatively check if - * something can be done via the mappings in OracleDataRepository. - */ - private String postProcess(String schemaSql, final MigrationContext context) { - if (context.getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { - schemaSql = schemaSql.replaceAll(CommercedbsyncConstants.MIGRATION_ORACLE_MAX, - CommercedbsyncConstants.MIGRATION_ORACLE_VARCHAR24k); - // another odd character that comes un in the SQL - LOG.info("Changing the NVARCHAR2 " + schemaSql); - schemaSql = schemaSql.replaceAll("NUMBER\\(10,0\\) DEFAULT \'\'\'\'\'\'", "NUMBER(10,0) DEFAULT 0"); - } - return schemaSql; - } - // ORACLE_TARGET - END - - @Override - public void executeSchemaDifferencesSql(final MigrationContext context, final String sql) throws Exception { - - if (!context.isSchemaMigrationEnabled()) { - throw new RuntimeException("Schema migration is disabled. Check property:" - + CommercedbsyncConstants.MIGRATION_SCHEMA_ENABLED); - } - - final Platform platform = context.getDataTargetRepository().asPlatform(); - final boolean continueOnError = false; - final Connection connection = platform.borrowConnection(); - try { - platform.evaluateBatch(connection, sql, continueOnError); - LOG.info("Executed the following sql to change the schema:\n" + sql); - writeReport(context, sql); - } catch (final Exception e) { - throw new RuntimeException("Could not execute Schema Diff Script", e); - } finally { - platform.returnConnection(connection); - } - } - - @Override - public void executeSchemaDifferences(final MigrationContext context) throws Exception { - executeSchemaDifferencesSql(context, generateSchemaDifferencesSql(context)); - } - - private Set findDuplicateTables(final MigrationContext migrationContext) { - try { - final Set stagingPrefixes = findStagingPrefixes(migrationContext); - final Set targetSet = migrationContext.getDataTargetRepository().getAllTableNames(); - return targetSet.stream() - .filter(t -> stagingPrefixes.stream().anyMatch(p -> StringUtils.startsWithIgnoreCase(t, p))) - .collect(Collectors.toSet()); - } catch (final Exception e) { - LOG.error("Error occurred while trying to find duplicate tables", e); - } - return Collections.EMPTY_SET; - } - - private Set findStagingPrefixes(final MigrationContext context) throws Exception { - final String currentSystemPrefix = configurationService.getConfiguration().getString("db.tableprefix"); - final String currentMigrationPrefix = context.getDataTargetRepository().getDataSourceConfiguration() - .getTablePrefix(); - final Set targetSet = context.getDataTargetRepository().getAllTableNames(); - final String deploymentsTable = CommercedbsyncConstants.DEPLOYMENTS_TABLE; - final Set detectedPrefixes = targetSet.stream().filter(t -> t.toLowerCase().endsWith(deploymentsTable)) - .filter(t -> !StringUtils.equalsIgnoreCase(t, currentSystemPrefix + deploymentsTable)) - .filter(t -> !StringUtils.equalsIgnoreCase(t, currentMigrationPrefix + deploymentsTable)) - .map(t -> StringUtils.removeEndIgnoreCase(t, deploymentsTable)).collect(Collectors.toSet()); - return detectedPrefixes; - - } - - private Database getDatabaseModelWithChanges4TableDrop(final MigrationContext context) { - final Set duplicateTables = findDuplicateTables(context); - final Database database = context.getDataTargetRepository().asDatabase(true); - // clear tables and add only the ones to be removed - final Table[] tables = database.getTables(); - Stream.of(tables).forEach(t -> { - database.removeTable(t); - }); - duplicateTables.forEach(t -> { - final Table table = ObjectUtils.defaultIfNull(database.findTable(t), new Table()); - table.setName(t); - database.addTable(table); - }); - return database; - } - - protected DatabaseStatus getDatabaseModelWithChanges4TableCreation(final MigrationContext migrationContext) - throws Exception { - final DatabaseStatus dbStatus = new DatabaseStatus(); - - final SchemaDifferenceResult differenceResult = getDifference(migrationContext); - if (!differenceResult.hasDifferences()) { - LOG.info("getDatabaseModelWithChanges4TableCreation - No Difference found in schema "); - dbStatus.setDatabase(migrationContext.getDataTargetRepository().asDatabase()); - dbStatus.setHasSchemaDiff(false); - return dbStatus; - } - final SchemaDifference targetDiff = differenceResult.getTargetSchema(); - final Database database = targetDiff.getDatabase(); - - // add missing tables in target - if (migrationContext.isAddMissingTablesToSchemaEnabled()) { - final List missingTables = targetDiff.getMissingTables(); - for (final TableKeyPair missingTable : missingTables) { - final Table tableClone = (Table) differenceResult.getSourceSchema().getDatabase() - .findTable(missingTable.getLeftName(), false).clone(); - tableClone.setName(missingTable.getRightName()); - tableClone.setCatalog( - migrationContext.getDataTargetRepository().getDataSourceConfiguration().getCatalog()); - tableClone - .setSchema(migrationContext.getDataTargetRepository().getDataSourceConfiguration().getSchema()); - database.addTable(tableClone); - LOG.info("getDatabaseModelWithChanges4TableCreation - missingTable.getRightName() =" - + missingTable.getRightName() + ", missingTable.getLeftName() = " + missingTable.getLeftName()); - } - } - - // add missing columns in target - if (migrationContext.isAddMissingColumnsToSchemaEnabled()) { - final ListMultimap missingColumnsInTable = targetDiff.getMissingColumnsInTable(); - for (final TableKeyPair missingColumnsTable : missingColumnsInTable.keySet()) { - final List columns = missingColumnsInTable.get(missingColumnsTable); - for (final String missingColumn : columns) { - final Table missingColumnsTableModel = differenceResult.getSourceSchema().getDatabase() - .findTable(missingColumnsTable.getLeftName(), false); - final Column columnClone = (Column) missingColumnsTableModel.findColumn(missingColumn, false) - .clone(); - LOG.info(" Column " + columnClone.getName() + ", Type = " + columnClone.getType() + ", Type Code " - + columnClone.getTypeCode() + ",size " + columnClone.getSize() + ", size as int " - + columnClone.getSizeAsInt()); - // columnClone.set - final Table table = database.findTable(missingColumnsTable.getRightName(), false); - Preconditions.checkState(table != null, "Data inconsistency: Table must exist."); - table.addColumn(columnClone); - } - } - } - - //remove superfluous tables in target - if (migrationContext.isRemoveMissingTablesToSchemaEnabled()) { - throw new UnsupportedOperationException("not yet implemented"); - } - - // remove superfluous columns in target - if (migrationContext.isRemoveMissingColumnsToSchemaEnabled()) { - final ListMultimap superfluousColumnsInTable = differenceResult.getSourceSchema() - .getMissingColumnsInTable(); - for (final TableKeyPair superfluousColumnsTable : superfluousColumnsInTable.keySet()) { - final List columns = superfluousColumnsInTable.get(superfluousColumnsTable); - for (final String superfluousColumn : columns) { - final Table table = database.findTable(superfluousColumnsTable.getLeftName(), false); - Preconditions.checkState(table != null, "Data inconsistency: Table must exist."); - final Column columnToBeRemoved = table.findColumn(superfluousColumn, false); - // remove indices in case column is part of one - Stream.of(table.getIndices()).filter(i -> i.hasColumn(columnToBeRemoved)) - .forEach(i -> table.removeIndex(i)); - table.removeColumn(columnToBeRemoved); - } - } - } - dbStatus.setDatabase(database); - dbStatus.setHasSchemaDiff(true); - LOG.info("getDatabaseModelWithChanges4TableCreation Schema Diff found - done "); - return dbStatus; - } - - protected void writeReport(final MigrationContext migrationContext, final String differenceSql) { - try { - final String fileName = String.format("schemaChanges-%s.sql", LocalDateTime.now().getNano()); - databaseMigrationReportStorageService.store(fileName, - new ByteArrayInputStream(differenceSql.getBytes(StandardCharsets.UTF_8))); - } catch (final Exception e) { - LOG.error("Error executing writing diff report", e); - } - } - - @Override - public SchemaDifferenceResult getDifference(final MigrationContext migrationContext) throws Exception { - try { - LOG.info("reading source database model ..."); - migrationContext.getDataSourceRepository().asDatabase(true); - LOG.info("reading target database model ..."); - migrationContext.getDataTargetRepository().asDatabase(true); - - LOG.info("computing SCHEMA diff, REF DB = " - + migrationContext.getDataTargetRepository().getDatabaseProvider().getDbName() - + "vs Checking in DB = " - + migrationContext.getDataSourceRepository().getDatabaseProvider().getDbName()); - final Set targetTableCandidates = copyItemProvider - .getTargetTableCandidates(migrationContext); - final SchemaDifference sourceSchemaDifference = computeDiff(migrationContext, - migrationContext.getDataTargetRepository(), migrationContext.getDataSourceRepository(), - targetTableCandidates); - LOG.info("compute SCHEMA diff, REF DB =" - + migrationContext.getDataSourceRepository().getDatabaseProvider().getDbName() - + "vs Checking in DB = " - + migrationContext.getDataTargetRepository().getDatabaseProvider().getDbName()); - final Set sourceTableCandidates = copyItemProvider - .getSourceTableCandidates(migrationContext); - final SchemaDifference targetSchemaDifference = computeDiff(migrationContext, - migrationContext.getDataSourceRepository(), migrationContext.getDataTargetRepository(), - sourceTableCandidates); - final SchemaDifferenceResult schemaDifferenceResult = new SchemaDifferenceResult(sourceSchemaDifference, - targetSchemaDifference); - LOG.info("Diff finished. Differences detected: " + schemaDifferenceResult.hasDifferences()); - - return schemaDifferenceResult; - } catch (final Exception e) { - throw new RuntimeException("Error computing schema diff", e); - } - } - - protected String getSchemaDifferencesAsJson(final SchemaDifferenceResult schemaDifferenceResult) { - final Gson gson = new GsonBuilder().setPrettyPrinting().create(); - return gson.toJson(schemaDifferenceResult); - } - - private void logMigrationContext(final MigrationContext context) { - if (context == null) { - return; - } - LOG.info("--------MIGRATION CONTEXT- START----------"); - LOG.info("isAddMissingColumnsToSchemaEnabled=" + context.isAddMissingColumnsToSchemaEnabled()); - LOG.info("isAddMissingTablesToSchemaEnabled=" + context.isAddMissingTablesToSchemaEnabled()); - LOG.info("isAuditTableMigrationEnabled=" + context.isAuditTableMigrationEnabled()); - LOG.info("isBulkCopyEnabled=" + context.isBulkCopyEnabled()); - LOG.info("isClusterMode=" + context.isClusterMode()); - LOG.info("isDeletionEnabled=" + context.isDeletionEnabled()); - LOG.info("isDisableAllIndexesEnabled=" + context.isDisableAllIndexesEnabled()); - LOG.info("isDropAllIndexesEnabled=" + context.isDropAllIndexesEnabled()); - LOG.info("isFailOnErrorEnabled=" + context.isFailOnErrorEnabled()); - LOG.info("isIncrementalModeEnabled=" + context.isIncrementalModeEnabled()); - LOG.info("isMigrationTriggeredByUpdateProcess=" + context.isMigrationTriggeredByUpdateProcess()); - LOG.info("isRemoveMissingColumnsToSchemaEnabled=" + context.isRemoveMissingColumnsToSchemaEnabled()); - LOG.info("isRemoveMissingTablesToSchemaEnabled=" + context.isRemoveMissingTablesToSchemaEnabled()); - LOG.info("isSchemaMigrationAutoTriggerEnabled=" + context.isSchemaMigrationAutoTriggerEnabled()); - LOG.info("isSchemaMigrationEnabled=" + context.isSchemaMigrationEnabled()); - LOG.info("isTruncateEnabled=" + context.isTruncateEnabled()); - LOG.info("getIncludedTables=" + context.getIncludedTables()); - LOG.info("getExcludedTables=" + context.getExcludedTables()); - LOG.info("getIncrementalTables=" + context.getIncrementalTables()); - LOG.info("getTruncateExcludedTables=" + context.getTruncateExcludedTables()); - LOG.info("getCustomTables=" + context.getCustomTables()); - LOG.info("getIncrementalTimestamp=" + context.getIncrementalTimestamp()); - LOG.info( - "Source TS Name=" + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Source TS Suffix =" - + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info( - "Target TS Name=" + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); - LOG.info("Target TS Suffix =" - + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); - LOG.info("getItemTypeViewNamePattern=" + context.getItemTypeViewNamePattern()); - - LOG.info("--------MIGRATION CONTEXT- END----------"); - } - - protected SchemaDifference computeDiff(final MigrationContext context, final DataRepository leftRepository, - final DataRepository rightRepository, final Set leftCandidates) { - logMigrationContext(context); - final SchemaDifference schemaDifference = new SchemaDifference(rightRepository.asDatabase(), - rightRepository.getDataSourceConfiguration().getTablePrefix()); - final Set leftDatabaseTables = getTables(context, leftRepository, leftCandidates); - LOG.info("LEFT Repo = " + leftRepository.getDatabaseProvider().getDbName()); - LOG.info("RIGHT Repo = " + rightRepository.getDatabaseProvider().getDbName()); - - try { - LOG.debug(" All tables in LEFT Repo " + leftRepository.getAllTableNames()); - LOG.debug(" All tables in RIGHT Repo " + rightRepository.getAllTableNames()); - } catch (final Exception e) { - LOG.error("Cannot fetch all Table Names" + e); - } - - // LOG.info(" -------------------------------"); - for (final TableCandidate leftCandidate : leftDatabaseTables) { - LOG.info(" Checking if Left Table exists --> " + leftCandidate.getFullTableName()); - final Table leftTable = leftRepository.asDatabase().findTable(leftCandidate.getFullTableName(), false); - if (leftTable == null) { - LOG.error(String.format("Table %s in DB %s cannot be found, but should exist", - leftCandidate.getFullTableName(), - leftRepository.getDataSourceConfiguration().getConnectionString())); - continue; - - // throw new RuntimeException(String.format("Table %s in DB %s - // cannot be found, but should exists", - // leftCandidate.getFullTableName(), - // leftRepository.getDataSourceConfiguration().getConnectionString())); - } - final String rightTableName = translateTableName(leftRepository, rightRepository, leftCandidate); - final Table rightTable = rightRepository.asDatabase().findTable(rightTableName, false); - if (rightTable == null) { - schemaDifference.getMissingTables().add(new TableKeyPair(leftTable.getName(), rightTableName)); - LOG.info("MISSING Table !! --> " + leftTable.getName() + " searched for " + rightTableName); - } else { - // LOG.info(" FOUND Table --> " + rightTable.getName()); - final Column[] leftTableColumns = leftTable.getColumns(); - for (final Column leftTableColumn : leftTableColumns) { - if (rightTable.findColumn(leftTableColumn.getName(), false) == null) { - LOG.info("Missing column --> " + leftTableColumn.getName() + " -->" + leftTable.getName()); - schemaDifference.getMissingColumnsInTable().put( - new TableKeyPair(leftTable.getName(), rightTable.getName()), leftTableColumn.getName()); - } - } - } - } - return schemaDifference; - } - - private String translateTableName(final DataRepository leftRepository, final DataRepository rightRepository, - final TableCandidate leftCandidate) { - String translatedTableName = rightRepository.getDataSourceConfiguration().getTablePrefix() - + leftCandidate.getBaseTableName(); - if (leftCandidate.isTypeSystemRelatedTable()) { - translatedTableName += rightRepository.getDataSourceConfiguration().getTypeSystemSuffix(); - } - // ORCALE_TEMP - START - /* - * if (!leftCandidate.getAdditionalSuffix().isEmpty() && - * translatedTableName.toLowerCase().endsWith(leftCandidate. - * getAdditionalSuffix())) { - * //System.out.println("$$Translated name ends with LP " + - * translatedTableName); return translatedTableName; } - */ - // ORCALE_TEMP - END - return translatedTableName + leftCandidate.getAdditionalSuffix(); - } - - private Set getTables(final MigrationContext context, final DataRepository repository, - final Set candidates) { - return candidates.stream().filter(c -> dataCopyTableFilter.filter(context).test(c.getCommonTableName())) - .collect(Collectors.toSet()); - } - - public void setDataCopyTableFilter(final DataCopyTableFilter dataCopyTableFilter) { - this.dataCopyTableFilter = dataCopyTableFilter; - } - - public void setDatabaseMigrationReportStorageService( - final DatabaseMigrationReportStorageService databaseMigrationReportStorageService) { - this.databaseMigrationReportStorageService = databaseMigrationReportStorageService; - } - - public void setConfigurationService(final ConfigurationService configurationService) { - this.configurationService = configurationService; - } - - public void setCopyItemProvider(final CopyItemProvider copyItemProvider) { - this.copyItemProvider = copyItemProvider; - } - - public static class SchemaDifferenceResult { - private final SchemaDifference sourceSchema; - private final SchemaDifference targetSchema; - - public SchemaDifferenceResult(final SchemaDifference sourceSchema, final SchemaDifference targetSchema) { - this.sourceSchema = sourceSchema; - this.targetSchema = targetSchema; - } - - public SchemaDifference getSourceSchema() { - return sourceSchema; - } - - public SchemaDifference getTargetSchema() { - return targetSchema; - } - - public boolean hasDifferences() { - final boolean hasMissingTargetTables = getTargetSchema().getMissingTables().size() > 0; - final boolean hasMissingColumnsInTargetTable = getTargetSchema().getMissingColumnsInTable().size() > 0; - final boolean hasMissingSourceTables = getSourceSchema().getMissingTables().size() > 0; - final boolean hasMissingColumnsInSourceTable = getSourceSchema().getMissingColumnsInTable().size() > 0; - return hasMissingTargetTables || hasMissingColumnsInTargetTable || hasMissingSourceTables - || hasMissingColumnsInSourceTable; - } - } - - class DatabaseStatus { - private Database database; - - /** - * @return the database - */ - public Database getDatabase() { - return database; - } - - /** - * @param database - * the database to set - */ - public void setDatabase(final Database database) { - this.database = database; - } - - /** - * @return the hasSchemaDiff - */ - public boolean isHasSchemaDiff() { - return hasSchemaDiff; - } - - /** - * @param hasSchemaDiff - * the hasSchemaDiff to set - */ - public void setHasSchemaDiff(final boolean hasSchemaDiff) { - this.hasSchemaDiff = hasSchemaDiff; - } - - private boolean hasSchemaDiff; - } - - public static class SchemaDifference { - - private final Database database; - private final String prefix; - - private final List missingTables = new ArrayList<>(); - private final ListMultimap missingColumnsInTable = ArrayListMultimap.create(); - - public SchemaDifference(final Database database, final String prefix) { - this.database = database; - this.prefix = prefix; - - } - - public Database getDatabase() { - return database; - } - - public String getPrefix() { - return prefix; - } - - public List getMissingTables() { - return missingTables; - } - - public ListMultimap getMissingColumnsInTable() { - return missingColumnsInTable; - } - } - - public static class TableKeyPair { - private final String leftName; - private final String rightName; - - public TableKeyPair(final String leftName, final String rightName) { - this.leftName = leftName; - this.rightName = rightName; - } - - public String getLeftName() { - return leftName; - } - - public String getRightName() { - return rightName; - } - - @Override - public boolean equals(final Object o) { - if (this == o) { - return true; - } - if (o == null || getClass() != o.getClass()) { - return false; - } - final TableKeyPair that = (TableKeyPair) o; - return leftName.equals(that.leftName) && rightName.equals(that.rightName); - } - - @Override - public int hashCode() { - return Objects.hash(leftName, rightName); - } - } - -} \ No newline at end of file + private static final Logger LOG = LoggerFactory.getLogger(DefaultDatabaseSchemaDifferenceService.class); + + private DataCopyTableFilter dataCopyTableFilter; + private DatabaseMigrationReportStorageService databaseMigrationReportStorageService; + private CopyItemProvider copyItemProvider; + private ConfigurationService configurationService; + + @Override + public String generateSchemaDifferencesSql(MigrationContext context) throws Exception { + final int maxStageMigrations = context.getMaxTargetStagedMigrations(); + final Set stagingPrefixes = findStagingPrefixes(context); + String schemaSql = ""; + if (stagingPrefixes.size() > maxStageMigrations) { + final Database databaseModelWithChanges = getDatabaseModelWithChanges4TableDrop(context); + LOG.info("generateSchemaDifferencesSql..getDatabaseModelWithChanges4TableDrop.. - calibrating changes "); + schemaSql = context.getDataTargetRepository().asPlatform().getDropTablesSql(databaseModelWithChanges, true); + LOG.info("generateSchemaDifferencesSql - generated DDL SQLs for DROP. "); + } else { + LOG.info( + "generateSchemaDifferencesSql..getDatabaseModelWithChanges4TableCreation - calibrating Schema changes "); + final DatabaseStatus databaseModelWithChanges = getDatabaseModelWithChanges4TableCreation(context); + if (databaseModelWithChanges.isHasSchemaDiff()) { + LOG.info("generateSchemaDifferencesSql..Schema Diff found - now to generate the SQLs "); + if (context.getDataTargetRepository().getDatabaseProvider().isHanaUsed()) { + schemaSql = context.getDataTargetRepository().asPlatform().getAlterTablesSql(null, + context.getDataTargetRepository().getDataSourceConfiguration().getSchema(), null, + databaseModelWithChanges.getDatabase()); + } else { + schemaSql = context.getDataTargetRepository().asPlatform() + .getAlterTablesSql(databaseModelWithChanges.getDatabase()); + } + + schemaSql = postProcess(schemaSql, context); + LOG.info("generateSchemaDifferencesSql - generated DDL ALTER SQLs. "); + } + + } + + return schemaSql; + } + + /* + * ORACLE_TARGET - START This a TEMP fix, it is difficlt to get from from Sql + * Server NVARCHAR(255), NVARCHAR(MAX) to convert properly into to Orcale's + * VARCHAR2(255) and CLOB respectively. Therefore when the schema script output + * has VARCHAR2(2147483647) which is from SqlServer's NVARCHAR(max), then we + * just make it CLOB. Alternatively check if something can be done via the + * mappings in OracleDataRepository. + */ + private String postProcess(String schemaSql, final MigrationContext context) { + if (context.getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { + schemaSql = schemaSql.replaceAll(CommercedbsyncConstants.MIGRATION_ORACLE_MAX, + CommercedbsyncConstants.MIGRATION_ORACLE_VARCHAR24k); + // another odd character that comes un in the SQL + LOG.info("Changing the NVARCHAR2 " + schemaSql); + schemaSql = schemaSql.replaceAll("NUMBER\\(10,0\\) DEFAULT ''''''", "NUMBER(10,0) DEFAULT 0"); + } + return schemaSql; + } + + @Override + public void executeSchemaDifferencesSql(final MigrationContext context, final String sql) throws Exception { + + if (!context.isSchemaMigrationEnabled()) { + throw new RuntimeException( + "Schema migration is disabled. Check property:" + CommercedbsyncConstants.MIGRATION_SCHEMA_ENABLED); + } + + final Platform platform = context.getDataTargetRepository().asPlatform(); + final boolean continueOnError = false; + final Connection connection = platform.borrowConnection(); + try { + platform.evaluateBatch(connection, sql, continueOnError); + LOG.info("Executed the following sql to change the schema:\n" + sql); + writeReport(context, sql); + } catch (final Exception e) { + throw new RuntimeException("Could not execute Schema Diff Script", e); + } finally { + platform.returnConnection(connection); + } + } + + @Override + public void executeSchemaDifferences(final MigrationContext context) throws Exception { + executeSchemaDifferencesSql(context, generateSchemaDifferencesSql(context)); + } + + private Set findDuplicateTables(final MigrationContext migrationContext) { + try { + final Set stagingPrefixes = findStagingPrefixes(migrationContext); + final Set targetSet = migrationContext.getDataTargetRepository().getAllTableNames(); + return targetSet.stream() + .filter(t -> stagingPrefixes.stream().anyMatch(p -> StringUtils.startsWithIgnoreCase(t, p))) + .collect(Collectors.toSet()); + } catch (final Exception e) { + LOG.error("Error occurred while trying to find duplicate tables", e); + } + return Collections.emptySet(); + } + + private Set findStagingPrefixes(final MigrationContext context) throws Exception { + final String currentSystemPrefix = configurationService.getConfiguration().getString("db.tableprefix"); + final String currentMigrationPrefix = context.getDataTargetRepository().getDataSourceConfiguration() + .getTablePrefix(); + final Set targetSet = context.getDataTargetRepository().getAllTableNames(); + final String deploymentsTable = CommercedbsyncConstants.DEPLOYMENTS_TABLE; + final Set detectedPrefixes = targetSet.stream().filter(t -> t.toLowerCase().endsWith(deploymentsTable)) + .filter(t -> !StringUtils.equalsIgnoreCase(t, currentSystemPrefix + deploymentsTable)) + .filter(t -> !StringUtils.equalsIgnoreCase(t, currentMigrationPrefix + deploymentsTable)) + .map(t -> StringUtils.removeEndIgnoreCase(t, deploymentsTable)).collect(Collectors.toSet()); + return detectedPrefixes; + + } + + private Database getDatabaseModelWithChanges4TableDrop(final MigrationContext context) { + final Set duplicateTables = findDuplicateTables(context); + final Database database = context.getDataTargetRepository().asDatabase(true); + // clear tables and add only the ones to be removed + final Table[] tables = database.getTables(); + Stream.of(tables).forEach(t -> { + database.removeTable(t); + }); + duplicateTables.forEach(t -> { + final Table table = ObjectUtils.defaultIfNull(database.findTable(t), new Table()); + table.setName(t); + database.addTable(table); + }); + return database; + } + + protected DatabaseStatus getDatabaseModelWithChanges4TableCreation(final MigrationContext migrationContext) + throws Exception { + final DatabaseStatus dbStatus = new DatabaseStatus(); + + final SchemaDifferenceResult differenceResult = getDifference(migrationContext); + if (!differenceResult.hasDifferences()) { + LOG.info("getDatabaseModelWithChanges4TableCreation - No Difference found in schema "); + dbStatus.setDatabase(migrationContext.getDataTargetRepository().asDatabase()); + dbStatus.setHasSchemaDiff(false); + return dbStatus; + } + final SchemaDifference targetDiff = differenceResult.getTargetSchema(); + final Database database = targetDiff.getDatabase(); + + // add missing tables in target + if (migrationContext.isAddMissingTablesToSchemaEnabled()) { + final List missingTables = targetDiff.getMissingTables(); + for (final TableKeyPair missingTable : missingTables) { + final Table tableClone = (Table) differenceResult.getSourceSchema().getDatabase() + .findTable(missingTable.getLeftName(), false).clone(); + tableClone.setName(missingTable.getRightName()); + tableClone.setCatalog( + migrationContext.getDataTargetRepository().getDataSourceConfiguration().getCatalog()); + tableClone + .setSchema(migrationContext.getDataTargetRepository().getDataSourceConfiguration().getSchema()); + database.addTable(tableClone); + LOG.info("getDatabaseModelWithChanges4TableCreation - missingTable.getRightName() =" + + missingTable.getRightName() + ", missingTable.getLeftName() = " + missingTable.getLeftName()); + } + } + + // add missing columns in target + if (migrationContext.isAddMissingColumnsToSchemaEnabled()) { + final ListMultimap missingColumnsInTable = targetDiff.getMissingColumnsInTable(); + for (final TableKeyPair missingColumnsTable : missingColumnsInTable.keySet()) { + final List columns = missingColumnsInTable.get(missingColumnsTable); + for (final String missingColumn : columns) { + final Table missingColumnsTableModel = differenceResult.getSourceSchema().getDatabase() + .findTable(missingColumnsTable.getLeftName(), false); + final Column columnClone = (Column) missingColumnsTableModel.findColumn(missingColumn, false) + .clone(); + LOG.info(" Column " + columnClone.getName() + ", Type = " + columnClone.getType() + ", Type Code " + + columnClone.getTypeCode() + ",size " + columnClone.getSize() + ", size as int " + + columnClone.getSizeAsInt()); + // columnClone.set + final Table table = database.findTable(missingColumnsTable.getRightName(), false); + Preconditions.checkState(table != null, "Data inconsistency: Table must exist."); + table.addColumn(columnClone); + } + } + } + + // remove superfluous tables in target + if (migrationContext.isRemoveMissingTablesToSchemaEnabled()) { + throw new UnsupportedOperationException("not yet implemented"); + } + + // remove superfluous columns in target + if (migrationContext.isRemoveMissingColumnsToSchemaEnabled()) { + final ListMultimap superfluousColumnsInTable = differenceResult.getSourceSchema() + .getMissingColumnsInTable(); + for (final TableKeyPair superfluousColumnsTable : superfluousColumnsInTable.keySet()) { + final List columns = superfluousColumnsInTable.get(superfluousColumnsTable); + for (final String superfluousColumn : columns) { + final Table table = database.findTable(superfluousColumnsTable.getLeftName(), false); + Preconditions.checkState(table != null, "Data inconsistency: Table must exist."); + final Column columnToBeRemoved = table.findColumn(superfluousColumn, false); + // remove indices in case column is part of one + Stream.of(table.getIndices()).filter(i -> i.hasColumn(columnToBeRemoved)) + .forEach(i -> table.removeIndex(i)); + table.removeColumn(columnToBeRemoved); + } + } + } + dbStatus.setDatabase(database); + dbStatus.setHasSchemaDiff(true); + LOG.info("getDatabaseModelWithChanges4TableCreation Schema Diff found - done "); + return dbStatus; + } + + protected void writeReport(final MigrationContext migrationContext, final String differenceSql) { + try { + final String fileName = String.format("schemaChanges-%s.sql", LocalDateTime.now().getNano()); + databaseMigrationReportStorageService.store(fileName, + new ByteArrayInputStream(differenceSql.getBytes(StandardCharsets.UTF_8))); + } catch (final Exception e) { + LOG.error("Error executing writing diff report", e); + } + } + + @Override + public SchemaDifferenceResult getDifference(final MigrationContext migrationContext) throws Exception { + try { + LOG.info("reading source database model ..."); + migrationContext.getDataSourceRepository().asDatabase(true); + LOG.info("reading target database model ..."); + migrationContext.getDataTargetRepository().asDatabase(true); + + LOG.info("computing SCHEMA diff, REF DB = " + + migrationContext.getDataTargetRepository().getDatabaseProvider().getDbName() + + " vs Checking in DB = " + + migrationContext.getDataSourceRepository().getDatabaseProvider().getDbName()); + final Set targetTableCandidates = copyItemProvider + .getTargetTableCandidates(migrationContext); + final SchemaDifference sourceSchemaDifference = computeDiff(migrationContext, + migrationContext.getDataTargetRepository(), migrationContext.getDataSourceRepository(), + targetTableCandidates); + LOG.info("compute SCHEMA diff, REF DB =" + + migrationContext.getDataSourceRepository().getDatabaseProvider().getDbName() + + "vs Checking in DB = " + + migrationContext.getDataTargetRepository().getDatabaseProvider().getDbName()); + final Set sourceTableCandidates = copyItemProvider + .getSourceTableCandidates(migrationContext); + final SchemaDifference targetSchemaDifference = computeDiff(migrationContext, + migrationContext.getDataSourceRepository(), migrationContext.getDataTargetRepository(), + sourceTableCandidates); + final SchemaDifferenceResult schemaDifferenceResult = new SchemaDifferenceResult(sourceSchemaDifference, + targetSchemaDifference); + LOG.info("Diff finished. Differences detected: " + schemaDifferenceResult.hasDifferences()); + + return schemaDifferenceResult; + } catch (final Exception e) { + throw new RuntimeException("Error computing schema diff", e); + } + } + + protected String getSchemaDifferencesAsJson(final SchemaDifferenceResult schemaDifferenceResult) { + final Gson gson = new GsonBuilder().setPrettyPrinting().create(); + return gson.toJson(schemaDifferenceResult); + } + + private void logMigrationContext(final MigrationContext context) { + if (!Config.getBoolean("migration.log.context.details", true) || context == null) { + return; + } + + LOG.info("--------MIGRATION CONTEXT- START----------"); + LOG.info("isAddMissingColumnsToSchemaEnabled=" + context.isAddMissingColumnsToSchemaEnabled()); + LOG.info("isAddMissingTablesToSchemaEnabled=" + context.isAddMissingTablesToSchemaEnabled()); + LOG.info("isAuditTableMigrationEnabled=" + context.isAuditTableMigrationEnabled()); + LOG.info("isClusterMode=" + context.isClusterMode()); + LOG.info("isDeletionEnabled=" + context.isDeletionEnabled()); + LOG.info("isDisableAllIndexesEnabled=" + context.isDisableAllIndexesEnabled()); + LOG.info("isDropAllIndexesEnabled=" + context.isDropAllIndexesEnabled()); + LOG.info("isFailOnErrorEnabled=" + context.isFailOnErrorEnabled()); + LOG.info("isIncrementalModeEnabled=" + context.isIncrementalModeEnabled()); + LOG.info("isMigrationTriggeredByUpdateProcess=" + context.isMigrationTriggeredByUpdateProcess()); + LOG.info("isRemoveMissingColumnsToSchemaEnabled=" + context.isRemoveMissingColumnsToSchemaEnabled()); + LOG.info("isRemoveMissingTablesToSchemaEnabled=" + context.isRemoveMissingTablesToSchemaEnabled()); + LOG.info("isSchemaMigrationAutoTriggerEnabled=" + context.isSchemaMigrationAutoTriggerEnabled()); + LOG.info("isSchemaMigrationEnabled=" + context.isSchemaMigrationEnabled()); + LOG.info("isTruncateEnabled=" + context.isTruncateEnabled()); + LOG.info("getIncludedTables=" + context.getIncludedTables()); + LOG.info("getExcludedTables=" + context.getExcludedTables()); + LOG.info("getIncrementalTables=" + context.getIncrementalTables()); + LOG.info("getTruncateExcludedTables=" + context.getTruncateExcludedTables()); + LOG.info("getCustomTables=" + context.getCustomTables()); + LOG.info("getIncrementalTimestamp=" + context.getIncrementalTimestamp()); + LOG.info( + "Source TS Name=" + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName()); + LOG.info("Source TS Suffix=" + + context.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemSuffix()); + LOG.info( + "Target TS Name=" + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName()); + LOG.info("Target TS Suffix=" + + context.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemSuffix()); + LOG.info("getItemTypeViewNamePattern=" + context.getItemTypeViewNamePattern()); + + LOG.info("--------MIGRATION CONTEXT- END----------"); + } + + protected SchemaDifference computeDiff(final MigrationContext context, final DataRepository leftRepository, + final DataRepository rightRepository, final Set leftCandidates) { + logMigrationContext(context); + final SchemaDifference schemaDifference = new SchemaDifference(rightRepository.asDatabase(), + rightRepository.getDataSourceConfiguration().getTablePrefix()); + final Set leftDatabaseTables = getTables(context, leftRepository, leftCandidates); + LOG.info("LEFT Repo = " + leftRepository.getDatabaseProvider().getDbName()); + LOG.info("RIGHT Repo = " + rightRepository.getDatabaseProvider().getDbName()); + + if (LOG.isDebugEnabled()) { + try { + LOG.debug(" All tables in LEFT Repo " + leftRepository.getAllTableNames()); + LOG.debug(" All tables in RIGHT Repo " + rightRepository.getAllTableNames()); + } catch (final Exception e) { + LOG.error("Cannot fetch all Table Names" + e); + } + } + + // LOG.info(" -------------------------------"); + for (final TableCandidate leftCandidate : leftDatabaseTables) { + LOG.info(" Checking if Left Table exists --> " + leftCandidate.getFullTableName()); + final Table leftTable = leftRepository.asDatabase().findTable(leftCandidate.getFullTableName(), false); + if (leftTable == null) { + LOG.error(String.format("Table %s in DB %s cannot be found, but should exist", + leftCandidate.getFullTableName(), + leftRepository.getDataSourceConfiguration().getConnectionString())); + continue; + + // throw new RuntimeException(String.format("Table %s in DB %s + // cannot be found, but should exists", + // leftCandidate.getFullTableName(), + // leftRepository.getDataSourceConfiguration().getConnectionString())); + } + final String rightTableName = translateTableName(leftRepository, rightRepository, leftCandidate); + final Table rightTable = rightRepository.asDatabase().findTable(rightTableName, false); + if (rightTable == null) { + schemaDifference.getMissingTables().add(new TableKeyPair(leftTable.getName(), rightTableName)); + LOG.info("MISSING Table !! --> " + leftTable.getName() + " searched for " + rightTableName); + } else { + // LOG.info(" FOUND Table --> " + rightTable.getName()); + final Column[] leftTableColumns = leftTable.getColumns(); + for (final Column leftTableColumn : leftTableColumns) { + if (rightTable.findColumn(leftTableColumn.getName(), false) == null) { + LOG.info("Missing column --> " + leftTableColumn.getName() + " -->" + leftTable.getName()); + schemaDifference.getMissingColumnsInTable().put( + new TableKeyPair(leftTable.getName(), rightTable.getName()), leftTableColumn.getName()); + } + } + } + } + return schemaDifference; + } + + private String translateTableName(final DataRepository leftRepository, final DataRepository rightRepository, + final TableCandidate leftCandidate) { + String translatedTableName = rightRepository.getDataSourceConfiguration().getTablePrefix() + + leftCandidate.getBaseTableName(); + if (leftCandidate.isTypeSystemRelatedTable()) { + translatedTableName += rightRepository.getDataSourceConfiguration().getTypeSystemSuffix(); + } + // ORCALE_TEMP - START + /* + * if (!leftCandidate.getAdditionalSuffix().isEmpty() && + * translatedTableName.toLowerCase().endsWith(leftCandidate. + * getAdditionalSuffix())) { + * //System.out.println("$$Translated name ends with LP " + + * translatedTableName); return translatedTableName; } + */ + // ORCALE_TEMP - END + return translatedTableName + leftCandidate.getAdditionalSuffix(); + } + + private Set getTables(final MigrationContext context, final DataRepository repository, + final Set candidates) { + return candidates.stream().filter(c -> dataCopyTableFilter.filter(context).test(c.getCommonTableName())) + .collect(Collectors.toSet()); + } + + public void setDataCopyTableFilter(final DataCopyTableFilter dataCopyTableFilter) { + this.dataCopyTableFilter = dataCopyTableFilter; + } + + public void setDatabaseMigrationReportStorageService( + final DatabaseMigrationReportStorageService databaseMigrationReportStorageService) { + this.databaseMigrationReportStorageService = databaseMigrationReportStorageService; + } + + public void setConfigurationService(final ConfigurationService configurationService) { + this.configurationService = configurationService; + } + + public void setCopyItemProvider(final CopyItemProvider copyItemProvider) { + this.copyItemProvider = copyItemProvider; + } + + public static class SchemaDifferenceResult { + private final SchemaDifference sourceSchema; + private final SchemaDifference targetSchema; + + public SchemaDifferenceResult(final SchemaDifference sourceSchema, final SchemaDifference targetSchema) { + this.sourceSchema = sourceSchema; + this.targetSchema = targetSchema; + } + + public SchemaDifference getSourceSchema() { + return sourceSchema; + } + + public SchemaDifference getTargetSchema() { + return targetSchema; + } + + public boolean hasDifferences() { + final boolean hasMissingTargetTables = getTargetSchema().getMissingTables().size() > 0; + final boolean hasMissingColumnsInTargetTable = getTargetSchema().getMissingColumnsInTable().size() > 0; + final boolean hasMissingSourceTables = getSourceSchema().getMissingTables().size() > 0; + final boolean hasMissingColumnsInSourceTable = getSourceSchema().getMissingColumnsInTable().size() > 0; + return hasMissingTargetTables || hasMissingColumnsInTargetTable || hasMissingSourceTables + || hasMissingColumnsInSourceTable; + } + } + + class DatabaseStatus { + private Database database; + + /** + * @return the database + */ + public Database getDatabase() { + return database; + } + + /** + * @param database + * the database to set + */ + public void setDatabase(final Database database) { + this.database = database; + } + + /** + * @return the hasSchemaDiff + */ + public boolean isHasSchemaDiff() { + return hasSchemaDiff; + } + + /** + * @param hasSchemaDiff + * the hasSchemaDiff to set + */ + public void setHasSchemaDiff(final boolean hasSchemaDiff) { + this.hasSchemaDiff = hasSchemaDiff; + } + + private boolean hasSchemaDiff; + } + + public static class SchemaDifference { + + private final Database database; + private final String prefix; + + private final List missingTables = new ArrayList<>(); + private final ListMultimap missingColumnsInTable = ArrayListMultimap.create(); + + public SchemaDifference(final Database database, final String prefix) { + this.database = database; + this.prefix = prefix; + + } + + public Database getDatabase() { + return database; + } + + public String getPrefix() { + return prefix; + } + + public List getMissingTables() { + return missingTables; + } + + public ListMultimap getMissingColumnsInTable() { + return missingColumnsInTable; + } + } + + public static class TableKeyPair { + private final String leftName; + private final String rightName; + + public TableKeyPair(final String leftName, final String rightName) { + this.leftName = leftName; + this.rightName = rightName; + } + + public String getLeftName() { + return leftName; + } + + public String getRightName() { + return rightName; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + final TableKeyPair that = (TableKeyPair) o; + return leftName.equals(that.leftName) && rightName.equals(that.rightName); + } + + @Override + public int hashCode() { + return Objects.hash(leftName, rightName); + } + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/PipeDatabaseMigrationCopyService.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/PipeDatabaseMigrationCopyService.java index 88b3e60..862294a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/PipeDatabaseMigrationCopyService.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/service/impl/PipeDatabaseMigrationCopyService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -9,19 +9,22 @@ import com.google.common.base.Stopwatch; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; import com.sap.cx.boosters.commercedbsync.scheduler.DatabaseCopyScheduler; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; +import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationCopyService; import com.sap.cx.boosters.commercedbsync.strategy.PipeWriterStrategy; import org.apache.commons.lang3.tuple.Pair; +import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; import com.sap.cx.boosters.commercedbsync.concurrent.DataPipeFactory; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolConfigBuilder; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolFactory; import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.dataset.DataSet; -import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationCopyService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.slf4j.MDC; -import org.springframework.core.task.AsyncTaskExecutor; import org.springframework.core.task.TaskRejectedException; +import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; import org.springframework.util.backoff.BackOffExecution; import org.springframework.util.backoff.ExponentialBackOff; @@ -43,15 +46,16 @@ public class PipeDatabaseMigrationCopyService implements DatabaseMigrationCopySe private final DataPipeFactory pipeFactory; private final PipeWriterStrategy writerStrategy; - private final AsyncTaskExecutor executor; + private final DataThreadPoolFactory dataWriteTaskFactory; private final DatabaseCopyTaskRepository databaseCopyTaskRepository; private final DatabaseCopyScheduler scheduler; - - public PipeDatabaseMigrationCopyService(DataPipeFactory pipeFactory, PipeWriterStrategy writerStrategy, AsyncTaskExecutor executor, DatabaseCopyTaskRepository databaseCopyTaskRepository, DatabaseCopyScheduler scheduler) { + public PipeDatabaseMigrationCopyService(DataPipeFactory pipeFactory, + PipeWriterStrategy writerStrategy, DataThreadPoolFactory dataWriteTaskFactory, + DatabaseCopyTaskRepository databaseCopyTaskRepository, DatabaseCopyScheduler scheduler) { this.pipeFactory = pipeFactory; this.writerStrategy = writerStrategy; - this.executor = executor; + this.dataWriteTaskFactory = dataWriteTaskFactory; this.databaseCopyTaskRepository = databaseCopyTaskRepository; this.scheduler = scheduler; } @@ -59,7 +63,8 @@ public PipeDatabaseMigrationCopyService(DataPipeFactory pipeFactory, Pi @Override public void copyAllAsync(CopyContext context) { Set copyItems = context.getCopyItems(); - Deque>> tasksToSchedule = generateCopyTasks(context, copyItems); + Deque>> tasksToSchedule = generateCopyTasks(context, + copyItems); scheduleTasks(context, tasksToSchedule); } @@ -70,24 +75,24 @@ public void copyAllAsync(CopyContext context) { * @param copyItems * @return */ - private Deque>> generateCopyTasks(CopyContext context, Set copyItems) { - return copyItems.stream() - .map(item -> Pair.of(item, (Callable) () -> { - final Stopwatch timer = Stopwatch.createStarted(); - try (MDC.MDCCloseable ignored = MDC.putCloseable(CommercedbsyncConstants.MDC_PIPELINE, item.getPipelineName())) { - try { - copy(context, item); - } catch (Exception e) { - LOG.error("Failed to copy item", e); - return Boolean.FALSE; - } finally { - // ORACLE_TARGET ADDED duration in seconds - final Stopwatch endStop = timer.stop(); + private Deque>> generateCopyTasks(CopyContext context, + Set copyItems) { + return copyItems.stream().map(item -> Pair.of(item, (Callable) () -> { + final Stopwatch timer = Stopwatch.createStarted(); + try (MDC.MDCCloseable ignored = MDC.putCloseable(CommercedbsyncConstants.MDC_PIPELINE, + item.getPipelineName())) { + try { + copy(context, item); + } catch (Exception e) { + LOG.error("Failed to copy item", e); + return Boolean.FALSE; + } finally { + final Stopwatch endStop = timer.stop(); silentlyUpdateCompletedState(context, item, endStop.toString(), endStop.elapsed().getSeconds()); - } - } - return Boolean.TRUE; - })).collect(Collectors.toCollection(LinkedList::new)); + } + } + return Boolean.TRUE; + })).collect(Collectors.toCollection(LinkedList::new)); } /** @@ -116,10 +121,14 @@ private void copy(CopyContext copyContext, CopyContext.DataCopyItem item) throws * @param context * @param tasksToSchedule */ - private void scheduleTasks(CopyContext context, Deque>> tasksToSchedule) { + private void scheduleTasks(CopyContext context, + Deque>> tasksToSchedule) { List>> runningTasks = new ArrayList<>(); BackOffExecution backoff = null; CopyContext.DataCopyItem previousReject = null; + DataThreadPoolConfig poolConfig = new DataThreadPoolConfigBuilder(context.getMigrationContext()) + .withPoolSize(context.getMigrationContext().getMaxParallelTableCopy()).build(); + ThreadPoolTaskExecutor executor = dataWriteTaskFactory.create(context, poolConfig); try { while (tasksToSchedule.peekFirst() != null) { Pair> task = tasksToSchedule.removeFirst(); @@ -132,12 +141,16 @@ private void scheduleTasks(CopyContext context, Deque should we? + // note: further update activities not stopped here -> should we? } } catch (Exception e) { failOnError = migrationContext.isFailOnErrorEnabled(); @@ -52,4 +54,4 @@ public boolean failOnInitUpdateError() { return failOnError; } -} \ No newline at end of file +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/setup/MigrationSystemSetup.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/setup/MigrationSystemSetup.java deleted file mode 100644 index 869c1c7..0000000 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/setup/MigrationSystemSetup.java +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. - * License: Apache-2.0 - * - */ - -package com.sap.cx.boosters.commercedbsync.setup; - -import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; -import de.hybris.platform.core.initialization.SystemSetup; -import de.hybris.platform.core.initialization.SystemSetupContext; -import de.hybris.platform.servicelayer.config.ConfigurationService; -import org.apache.commons.lang.StringUtils; -import com.sap.cx.boosters.commercedbsync.context.MigrationContext; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationSynonymService; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * This class provides hooks into the system's initialization and update processes. - */ -@SystemSetup(extension = CommercedbsyncConstants.EXTENSIONNAME) -public class MigrationSystemSetup { - - private static final Logger LOG = LoggerFactory.getLogger(MigrationSystemSetup.class); - - private MigrationContext migrationContext; - private ConfigurationService configurationService; - private DatabaseMigrationSynonymService databaseMigrationSynonymService; - - public MigrationSystemSetup(MigrationContext migrationContext, ConfigurationService configurationService, DatabaseMigrationSynonymService databaseMigrationSynonymService) { - this.migrationContext = migrationContext; - this.configurationService = configurationService; - this.databaseMigrationSynonymService = databaseMigrationSynonymService; - } - - /** - * CCv2 Workaround: ccv2 builder does not support prefixes yet. - * creating synonym on ydeployments -> prefix_yeployments - * creating synonym on attributedescriptors -> prefix_attributedescriptors. - * - * @param context - * @throws Exception - */ - @SystemSetup(type = SystemSetup.Type.ESSENTIAL, process = SystemSetup.Process.ALL) - public void createEssentialData(final SystemSetupContext context) throws Exception { - String actualPrefix = configurationService.getConfiguration().getString("db.tableprefix"); - if (StringUtils.isNotEmpty(actualPrefix)) { - databaseMigrationSynonymService.recreateSynonyms(migrationContext.getDataTargetRepository(), actualPrefix); - } - } - -} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/PipeWriterStrategy.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/PipeWriterStrategy.java index 8b96053..780025b 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/PipeWriterStrategy.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/PipeWriterStrategy.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterContext.java new file mode 100644 index 0000000..6f07be4 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterContext.java @@ -0,0 +1,78 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.strategy.impl; + +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; + +import java.util.List; +import java.util.Set; +import java.util.concurrent.atomic.AtomicLong; + +class CopyPipeWriterContext { + private final CopyContext context; + private final CopyContext.DataCopyItem copyItem; + private final List columnsToCopy; + private final Set nullifyColumns; + private final PerformanceRecorder performanceRecorder; + private final AtomicLong totalCount; + private final List upsertIds; + private final boolean requiresIdentityInsert; + private final DatabaseCopyTaskRepository databaseCopyTaskRepository; + + public CopyPipeWriterContext(CopyContext context, CopyContext.DataCopyItem copyItem, List columnsToCopy, + Set nullifyColumns, PerformanceRecorder performanceRecorder, AtomicLong totalCount, + List upsertIds, boolean requiresIdentityInsert, + DatabaseCopyTaskRepository databaseCopyTaskRepository) { + this.context = context; + this.copyItem = copyItem; + this.columnsToCopy = columnsToCopy; + this.nullifyColumns = nullifyColumns; + this.performanceRecorder = performanceRecorder; + this.totalCount = totalCount; + this.upsertIds = upsertIds; + this.requiresIdentityInsert = requiresIdentityInsert; + this.databaseCopyTaskRepository = databaseCopyTaskRepository; + } + + public CopyContext getContext() { + return context; + } + + public CopyContext.DataCopyItem getCopyItem() { + return copyItem; + } + + public List getColumnsToCopy() { + return columnsToCopy; + } + + public Set getNullifyColumns() { + return nullifyColumns; + } + + public PerformanceRecorder getPerformanceRecorder() { + return performanceRecorder; + } + + public AtomicLong getTotalCount() { + return totalCount; + } + + public List getUpsertIds() { + return upsertIds; + } + + public boolean isRequiresIdentityInsert() { + return requiresIdentityInsert; + } + + public DatabaseCopyTaskRepository getDatabaseCopyTaskRepository() { + return databaseCopyTaskRepository; + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java index 5691b33..a86d040 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterStrategy.java @@ -1,88 +1,70 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsync.strategy.impl; -import com.google.common.base.Joiner; -import com.google.common.base.Splitter; -import com.google.common.base.Stopwatch; -import com.microsoft.sqlserver.jdbc.SQLServerBulkCopy; -import com.microsoft.sqlserver.jdbc.SQLServerBulkCopyOptions; -import com.microsoft.sqlserver.jdbc.SQLServerConnection; import com.sap.cx.boosters.commercedbsync.concurrent.DataWorkerExecutor; import com.sap.cx.boosters.commercedbsync.concurrent.MaybeFinished; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; import com.sap.cx.boosters.commercedbsync.performance.PerformanceCategory; import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; -import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; +import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTask; import com.sap.cx.boosters.commercedbsync.strategy.PipeWriterStrategy; import de.hybris.bootstrap.ddl.DataBaseProvider; -import java.io.StringReader; -import java.util.Collections; - -import org.apache.commons.collections.MapUtils; import org.apache.commons.lang.StringUtils; +import com.sap.cx.boosters.commercedbsync.DataThreadPoolConfig; import com.sap.cx.boosters.commercedbsync.concurrent.DataPipe; -import com.sap.cx.boosters.commercedbsync.concurrent.DataWorkerPoolFactory; -import com.sap.cx.boosters.commercedbsync.concurrent.RetriableTask; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolConfigBuilder; +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolFactory; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.RetriableTask; import com.sap.cx.boosters.commercedbsync.concurrent.impl.DefaultDataWorkerExecutor; import com.sap.cx.boosters.commercedbsync.context.CopyContext; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; -import com.sap.cx.boosters.commercedbsync.dataset.DataColumn; import com.sap.cx.boosters.commercedbsync.dataset.DataSet; -import com.sap.cx.boosters.commercedbsync.dataset.impl.DefaultDataSet; import com.sap.cx.boosters.commercedbsync.profile.DataSourceConfiguration; import com.sap.cx.boosters.commercedbsync.service.DatabaseCopyTaskRepository; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationDataTypeMapperService; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor; import java.sql.Connection; -import java.sql.PreparedStatement; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import java.sql.Statement; import java.util.ArrayList; import java.util.List; +import java.util.Optional; import java.util.Set; import java.util.TreeSet; import java.util.concurrent.atomic.AtomicLong; -import java.util.stream.Collectors; -import java.util.stream.IntStream; - public class CopyPipeWriterStrategy implements PipeWriterStrategy { private static final Logger LOG = LoggerFactory.getLogger(CopyPipeWriterStrategy.class); - private final DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService; - private final DatabaseCopyTaskRepository taskRepository; - private final DataWorkerPoolFactory dataWriteWorkerPoolFactory; + private final DataThreadPoolFactory dataWriteWorkerPoolFactory; private static final String LP_SUFFIX = "lp"; - public CopyPipeWriterStrategy(DatabaseMigrationDataTypeMapperService databaseMigrationDataTypeMapperService, DatabaseCopyTaskRepository taskRepository, DataWorkerPoolFactory dataWriteWorkerPoolFactory) { - this.databaseMigrationDataTypeMapperService = databaseMigrationDataTypeMapperService; + public CopyPipeWriterStrategy(DatabaseCopyTaskRepository taskRepository, + DataThreadPoolFactory dataWriteWorkerPoolFactory) { this.taskRepository = taskRepository; this.dataWriteWorkerPoolFactory = dataWriteWorkerPoolFactory; } @Override public void write(CopyContext context, DataPipe pipe, CopyContext.DataCopyItem item) throws Exception { - // ORACLE_TARGET - START - // Fetch the provider to figure out the name of the DBName - final DataBaseProvider dbProvider = context.getMigrationContext().getDataTargetRepository() - .getDatabaseProvider(); - // ORACLE_TARGET - END + final DataBaseProvider dbProvider = context.getMigrationContext().getDataTargetRepository() + .getDatabaseProvider(); String targetTableName = item.getTargetItem(); - PerformanceRecorder performanceRecorder = context.getPerformanceProfiler().createRecorder(PerformanceCategory.DB_WRITE, targetTableName); + PerformanceRecorder performanceRecorder = context.getPerformanceProfiler() + .createRecorder(PerformanceCategory.DB_WRITE, targetTableName); performanceRecorder.start(); Set excludedColumns = new TreeSet<>(String.CASE_INSENSITIVE_ORDER); if (context.getMigrationContext().getExcludedColumns().containsKey(targetTableName)) { @@ -97,9 +79,9 @@ public void write(CopyContext context, DataPipe pipe, CopyContext.DataC List columnsToCopy = new ArrayList<>(); try (Connection sourceConnection = context.getMigrationContext().getDataSourceRepository().getConnection(); - Statement stmt = sourceConnection.createStatement(); - ResultSet metaResult = stmt.executeQuery(String.format("select * from %s where 0 = 1", item.getSourceItem())); - ) { + Statement stmt = sourceConnection.createStatement(); + ResultSet metaResult = stmt + .executeQuery(String.format("select * from %s where 0 = 1", item.getSourceItem()))) { ResultSetMetaData sourceMeta = metaResult.getMetaData(); int columnCount = sourceMeta.getColumnCount(); for (int i = 1; i <= columnCount; i++) { @@ -111,29 +93,25 @@ public void write(CopyContext context, DataPipe pipe, CopyContext.DataC } if (columnsToCopy.isEmpty()) { - throw new IllegalStateException(String.format("%s: source has no columns or all columns excluded", item.getPipelineName())); + throw new IllegalStateException( + String.format("%s: source has no columns or all columns excluded", item.getPipelineName())); } - ThreadPoolTaskExecutor taskExecutor = dataWriteWorkerPoolFactory.create(context); + DataThreadPoolConfig threadPoolConfig = new DataThreadPoolConfigBuilder(context.getMigrationContext()) + .withPoolSize(context.getMigrationContext().getMaxParallelWriterWorkers()).build(); + ThreadPoolTaskExecutor taskExecutor = dataWriteWorkerPoolFactory.create(context, threadPoolConfig); DataWorkerExecutor workerExecutor = new DefaultDataWorkerExecutor<>(taskExecutor); Connection targetConnection = null; - AtomicLong totalCount = new AtomicLong(0); + AtomicLong totalCount = new AtomicLong( + taskRepository.findPipeline(context, item).map(DatabaseCopyTask::getTargetrowcount).orElse(0L)); List upsertIds = new ArrayList<>(); try { targetConnection = context.getMigrationContext().getDataTargetRepository().getConnection(); - // ORACLE_TARGET - START - pass the dbProvider and dsConfiguration - // information into the requiredidentityinsert function - boolean requiresIdentityInsert = false; - if (dbProvider.isPostgreSqlUsed()){ - // do nothing - } else { - requiresIdentityInsert = requiresIdentityInsert(item.getTargetItem(), targetConnection, - dbProvider, context.getMigrationContext().getDataTargetRepository().getDataSourceConfiguration()); - } - // ORACLE_TARGET - START - pass the dbProvider info into the - // requiredidentityinsert function + final boolean requiresIdentityInsert = requiresIdentityInsert(item.getTargetItem(), targetConnection, + dbProvider, context.getMigrationContext().getDataTargetRepository().getDataSourceConfiguration()); MaybeFinished sourcePage; boolean firstPage = true; + CopyPipeWriterContext copyPipeWriterContext = null; do { sourcePage = pipe.get(); if (sourcePage.isPoison()) { @@ -141,28 +119,29 @@ public void write(CopyContext context, DataPipe pipe, CopyContext.DataC } DataSet dataSet = sourcePage.getValue(); if (firstPage) { - doTruncateIfNecessary(context, item.getTargetItem()); + if (doTruncateIfNecessary(context, item)) { + totalCount.set(0); + taskRepository.updateTaskProgress(context, item, totalCount.get()); + } doTurnOnOffIndicesIfNecessary(context, item.getTargetItem(), false); if (context.getMigrationContext().isIncrementalModeEnabled()) { if (context.getMigrationContext().isLpTableMigrationEnabled() - && StringUtils.endsWithIgnoreCase(item.getSourceItem(),LP_SUFFIX)){ - determineLpUpsertId(upsertIds, dataSet); - } else{ - determineUpsertId(upsertIds, dataSet); + && StringUtils.endsWithIgnoreCase(item.getSourceItem(), LP_SUFFIX)) { + determineLpUpsertId(upsertIds, dataSet); + } else { + determineUpsertId(upsertIds, dataSet); } } + copyPipeWriterContext = new CopyPipeWriterContext(context, item, columnsToCopy, nullifyColumns, + performanceRecorder, totalCount, upsertIds, requiresIdentityInsert, taskRepository); firstPage = false; } if (dataSet.isNotEmpty()) { - DataWriterContext dataWriterContext = new DataWriterContext(context, item, dataSet, columnsToCopy, nullifyColumns, performanceRecorder, totalCount, upsertIds, requiresIdentityInsert); - RetriableTask writerTask = createWriterTask(dataWriterContext); + RetriableTask writerTask = createWriterTask(copyPipeWriterContext, dataSet); workerExecutor.safelyExecute(writerTask); } } while (!sourcePage.isDone()); workerExecutor.waitAndRethrowUncaughtExceptions(); - if (taskExecutor != null) { - taskExecutor.shutdown(); - } } catch (Exception e) { pipe.requestAbort(e); if (e instanceof InterruptedException) { @@ -170,49 +149,48 @@ public void write(CopyContext context, DataPipe pipe, CopyContext.DataC } throw e; } finally { + if (taskExecutor != null) { + dataWriteWorkerPoolFactory.destroy(taskExecutor); + } if (targetConnection != null) { doTurnOnOffIndicesIfNecessary(context, item.getTargetItem(), true); targetConnection.close(); } - updateProgress(context, item, totalCount.get()); - } - } - private void switchIdentityInsert(Connection connection, final String tableName, boolean on) { - try (Statement stmt = connection.createStatement()) { - String onOff = on ? "ON" : "OFF"; - stmt.executeUpdate(String.format("SET IDENTITY_INSERT %s %s", tableName, onOff)); - } catch (final Exception e) { - //TODO using brute force FIX + taskRepository.updateTaskProgress(context, item, totalCount.get()); } } - protected void executeBatch(CopyContext.DataCopyItem item, PreparedStatement preparedStatement, long batchCount, PerformanceRecorder recorder) throws SQLException { - final Stopwatch timer = Stopwatch.createStarted(); - preparedStatement.executeBatch(); - preparedStatement.clearBatch(); - LOG.debug("Batch written ({} items) for table '{}' in {}", batchCount, item.getTargetItem(), timer.stop().toString()); - recorder.record(PerformanceUnit.ROWS, batchCount); - } - - private void updateProgress(CopyContext context, CopyContext.DataCopyItem item, long totalCount) { - try { - taskRepository.updateTaskProgress(context, item, totalCount); - } catch (Exception e) { - LOG.warn("Could not update progress", e); + private boolean doTruncateIfNecessary(CopyContext context, CopyContext.DataCopyItem item) throws Exception { + String targetTableName = item.getTargetItem(); + if (context.getMigrationContext().isSchedulerResumeEnabled()) { + Optional pipeline = taskRepository.findPipeline(context, item); + if (pipeline.isPresent()) { + DatabaseCopyTask databaseCopyTask = pipeline.get(); + /* + * check if table was initially truncated. Could happen that batches are + * scheduled but migration was aborted before truncation. + */ + if (databaseCopyTask.isTruncated()) { + return false; + } + } } - } - - protected void doTruncateIfNecessary(CopyContext context, String targetTableName) throws Exception { if (context.getMigrationContext().isTruncateEnabled()) { if (!context.getMigrationContext().getTruncateExcludedTables().contains(targetTableName)) { - assertTruncateAllowed(context, targetTableName); + assertTruncateAllowed(context); context.getMigrationContext().getDataTargetRepository().truncateTable(targetTableName); + taskRepository.markTaskTruncated(context, item); + return true; + } else { + taskRepository.markTaskTruncated(context, item); } } + return false; } - protected void doTurnOnOffIndicesIfNecessary(CopyContext context, String targetTableName, boolean on) throws Exception { + protected void doTurnOnOffIndicesIfNecessary(CopyContext context, String targetTableName, boolean on) + throws Exception { if (context.getMigrationContext().isDropAllIndexesEnabled()) { if (!on) { LOG.debug("{} indexes for table '{}'", "Dropping", targetTableName); @@ -235,863 +213,83 @@ protected void doTurnOnOffIndicesIfNecessary(CopyContext context, String targetT } } - protected void assertTruncateAllowed(CopyContext context, String targetTableName) throws Exception { + protected void assertTruncateAllowed(CopyContext context) { if (context.getMigrationContext().isIncrementalModeEnabled()) { - throw new IllegalStateException("Truncating tables in incremental mode is illegal. Change the property " + CommercedbsyncConstants.MIGRATION_DATA_TRUNCATE_ENABLED + " to false"); + throw new IllegalStateException("Truncating tables in incremental mode is illegal. Change the property " + + CommercedbsyncConstants.MIGRATION_DATA_TRUNCATE_ENABLED + " to false"); } } - protected boolean isColumnOverride(CopyContext context, CopyContext.DataCopyItem item, String sourceColumnName) { - return MapUtils.isNotEmpty(item.getColumnMap()) && item.getColumnMap().containsKey(sourceColumnName); - } - - protected boolean isColumnOverride(CopyContext context, CopyContext.DataCopyItem item) { - return MapUtils.isNotEmpty(item.getColumnMap()); - } - - private PreparedStatement createPreparedStatement(final CopyContext context, final String targetTableName, - final List columnsToCopy, final List upsertIds, final Connection targetConnection) - throws Exception { - if (context.getMigrationContext().isIncrementalModeEnabled()) { - if (!upsertIds.isEmpty()) { - // ORACLE_TARGET - START - String sqlBuild = ""; - if (context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { - sqlBuild = getBulkUpsertStatementOracle(targetTableName, columnsToCopy, upsertIds.get(0)); - } else if (context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isHanaUsed()) { - sqlBuild = getBulkUpsertStatementHana(targetTableName, columnsToCopy, upsertIds); - } else if (context.getMigrationContext().getDataTargetRepository().getDatabaseProvider().isPostgreSqlUsed()) { - sqlBuild = getBulkUpsertStatementPostGres(targetTableName, columnsToCopy, upsertIds.get(0)); - } - else { - sqlBuild = getBulkUpsertStatement(targetTableName, columnsToCopy, upsertIds); - } - return targetConnection.prepareStatement(sqlBuild); - // ORACLE_TARGET - END - } else { - throw new RuntimeException( - "The incremental approach can only be used on tables that have a valid identifier like PK or ID"); - } - } else { - return targetConnection.prepareStatement(getBulkInsertStatement(targetTableName, columnsToCopy, - columnsToCopy.stream().map(column -> "?").collect(Collectors.toList()))); - } - } - - private String getBulkInsertStatement(String targetTableName, List columnsToCopy, List columnsToCopyValues) { - return "INSERT INTO " + targetTableName + " " + getBulkInsertStatementParamList(columnsToCopy, columnsToCopyValues); - } - - private String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { - return "(" - + String.join(", ", columnsToCopy) + ") VALUES (" - + columnsToCopyValues.stream().collect(Collectors.joining(", ")) - + ")"; - } - - private String getBulkUpdateStatementParamList(List columnsToCopy, List columnsToCopyValues) { - return "SET " + IntStream.range(0, columnsToCopy.size()).mapToObj(idx -> String.format("%s = %s", columnsToCopy.get(idx), columnsToCopyValues.get(idx))).collect(Collectors.joining(", ")); - } - - // ORACLE_TARGET -- START - private String getBulkUpdateStatementParamListOracle(final List columnsToCopy, - final List columnsToCopyValues) { - - final List columnsToCopyMinusPK = columnsToCopy.stream().filter(s -> !s.equalsIgnoreCase("PK")) - .collect(Collectors.toList()); - final List columnsToCopyValuesMinusPK = columnsToCopyValues.stream() - .filter(s -> !s.equalsIgnoreCase("s.PK")).collect(Collectors.toList()); - LOG.debug("getBulkUpdateStatementParamListOracle - columnsToCopyMinusPK =" + columnsToCopyMinusPK); - return "SET " + IntStream.range(0, columnsToCopyMinusPK.size()).mapToObj( - idx -> String.format("%s = %s", columnsToCopyMinusPK.get(idx), columnsToCopyValuesMinusPK.get(idx))) - .collect(Collectors.joining(", ")); - } - // ORACLE_TARGET -- END - private void determineUpsertId(List upsertIds ,DataSet dataSet) { + private void determineUpsertId(List upsertIds, DataSet dataSet) { if (dataSet.hasColumn("PK")) { upsertIds.add("PK"); return; } else if (dataSet.hasColumn("ID")) { upsertIds.add("ID"); - return; } else { - //should we support more IDs? In the hybris context there is hardly any other with regards to transactional data. - return ; + // should we support more IDs? In the hybris context there is hardly any other + // with regards to transactional data. } } - private void determineLpUpsertId(List upsertIds ,DataSet dataSet) { - if (dataSet.hasColumn("ITEMPK") - && dataSet.hasColumn("LANGPK")) { + private void determineLpUpsertId(List upsertIds, DataSet dataSet) { + if (dataSet.hasColumn("ITEMPK") && dataSet.hasColumn("LANGPK")) { upsertIds.add("ITEMPK"); upsertIds.add("LANGPK"); - return; - } else{ - //should we support more IDs? In the hybris context there is hardly any other with regards to transactional data. - return; + } else { + // should we support more IDs? In the hybris context there is hardly any other + // with regards to transactional data. } } - private String getBulkUpsertStatement(String targetTableName, List columnsToCopy, List upsertIds) { - /* - * https://michaeljswart.com/2017/07/sql-server-upsert-patterns-and-antipatterns/ - * We are not using a stored procedure here as CCv2 does not grant sp exec permission to the default db user - */ - StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("MERGE %s WITH (HOLDLOCK) AS t", targetTableName)); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("USING (SELECT %s) AS s ON ", Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())))); - sqlBuilder.append(String.format("( %s )" , upsertIds.stream().map(column -> String.format(" t.%s = s.%s",column,column)).collect(Collectors.joining(" AND ")))); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN MATCHED THEN UPDATE"); //update - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkUpdateStatementParamList(columnsToCopy, columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); //insert - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - sqlBuilder.append(";"); - // ORACLE_TARGET - LOG.debug("UPSERT SQL SERVER SQl builder=" + sqlBuilder.toString()); - return sqlBuilder.toString(); - } - - // ORACLE_TARGET - START - private String getBulkUpsertStatementOracle(final String targetTableName, final List columnsToCopy, - final String columnId) { - - final StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("MERGE INTO %s t", targetTableName)); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("USING (SELECT %s from dual) s ON (t.%s = s.%s)", - Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())), - columnId, columnId)); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN MATCHED THEN UPDATE"); // update - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkUpdateStatementParamListOracle(columnsToCopy, - columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); // insert - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, - columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - // sqlBuilder.append(";"); - // ORACLE_TARGET - LOG.debug("UPSERT ORACLE SQl builder=" + sqlBuilder.toString()); - return sqlBuilder.toString(); - } - // ORACLE_TARGET - END - - private String getBulkUpsertStatementPostGres(final String targetTableName, final List columnsToCopy, - final String columnId) { - - final StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("MERGE INTO %s t", targetTableName)); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("USING (SELECT %s from dual) s ON (t.%s = s.%s)", - Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())), - columnId, columnId)); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN MATCHED THEN UPDATE"); // update - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkUpdateStatementParamListOracle(columnsToCopy, - columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); // insert - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, - columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - // sqlBuilder.append(";"); - // ORACLE_TARGET - LOG.debug("UPSERT PostGres SQl builder=" + sqlBuilder.toString()); - return sqlBuilder.toString(); - } - - private String getBulkUpsertStatementHana(final String targetTableName, final List columnsToCopy, - List upsertIds) { - final StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("MERGE INTO %s t", targetTableName)); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("USING (SELECT %s from dummy) s ON ", Joiner.on(',').join(columnsToCopy.stream().map(column -> "? " + column).collect(Collectors.toList())))); - sqlBuilder.append(String.format("( %s )" , upsertIds.stream().map(column -> String.format(" t.%s = s.%s",column,column)).collect(Collectors.joining(" AND ")))); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN MATCHED THEN UPDATE"); // update - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkUpdateStatementParamListOracle(columnsToCopy, - columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN NOT MATCHED THEN INSERT"); // insert - sqlBuilder.append("\n"); - sqlBuilder.append(getBulkInsertStatementParamList(columnsToCopy, - columnsToCopy.stream().map(column -> "s." + column).collect(Collectors.toList()))); - // sqlBuilder.append(";"); - // ORACLE_TARGET - LOG.debug("UPSERT HANA SQl builder=" + sqlBuilder.toString()); - return sqlBuilder.toString(); - } - - private String getBulkDeleteStatement(String targetTableName, String columnId) { - /* - * https://michaeljswart.com/2017/07/sql-server-upsert-patterns-and-antipatterns/ - * We are not using a stored procedure here as CCv2 does not grant sp exec permission to the default db user - */ - StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("MERGE %s WITH (HOLDLOCK) AS t", targetTableName)); - sqlBuilder.append("\n"); - sqlBuilder.append(String.format("USING (SELECT %s) AS s ON t.%s = s.%s", "? " + columnId, columnId, columnId)); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN MATCHED THEN DELETE"); //DELETE - sqlBuilder.append(";"); - // ORACLE_TARGET - LOG.debug("MERGE-DELETE SQL Server " + sqlBuilder.toString()); - return sqlBuilder.toString(); - } - - // ORACLE_TARGET - START - private String getBulkDeleteStatementOracle(final String targetTableName, final String columnId) { - final StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append(String.format("MERGE INTO %s t", targetTableName)); - sqlBuilder.append("\n"); - // sqlBuilder.append(String.format("USING (SELECT %s , '2022-02-15 - // 10:48:49.496' modifiedTS from dual) s ON (t.%s = s.%s)", - // "? " + columnId, columnId, columnId)); - sqlBuilder.append( - String.format("USING (SELECT %s from dual) s ON (t.%s = s.%s)", "? " + columnId, columnId, columnId)); - sqlBuilder.append("\n"); - sqlBuilder.append("WHEN MATCHED THEN "); // DELETE - sqlBuilder.append("UPDATE SET t.HJMPTS = 0 "); // IS INSERT OR UPDATE - // MANDATORY, therefore - // setting a dummy - // value. Hopefully - // HJMPTS is present in - // all tables - sqlBuilder.append("DELETE WHERE " + String.format(" t.%s = s.%s ", columnId, columnId));// DELETE - // is - // OPTIONAL - // sqlBuilder.append(";"); - // ORACLE_TARGET - LOG.debug("MERGE-DELETE ORACLE " + sqlBuilder.toString()); - return sqlBuilder.toString(); - } - // ORACLE_TARGET - END - - // ORACLE_TARGET -- START Helper Function 1 - private StringBuilder buildSqlForIdentityInsertCheck(final String targetTableName, - final DataBaseProvider dbProvider, final DataSourceConfiguration dsConfig) { - final StringBuilder sqlBuilder = new StringBuilder(); + private String buildSqlForIdentityInsertCheck(final String targetTableName, final DataBaseProvider dbProvider, + final DataSourceConfiguration dsConfig) { + final String sql; if (dbProvider.isMssqlUsed()) { - sqlBuilder.append("SELECT \n"); - sqlBuilder.append("count(*)\n"); - sqlBuilder.append("FROM sys.columns\n"); - sqlBuilder.append("WHERE\n"); - sqlBuilder.append(String.format("object_id = object_id('%s')\n", targetTableName)); - sqlBuilder.append("AND\n"); - sqlBuilder.append("is_identity = 1\n"); - sqlBuilder.append(";\n"); + sql = String.format( + "SELECT CASE WHEN COUNT(*) > 0 THEN 1 ELSE 0 END AS BIT FROM sys.columns WHERE object_id = object_id('%s') AND is_identity = 1", + targetTableName); } else if (dbProvider.isOracleUsed()) { - // get schema name - final String schema = dsConfig.getSchema(); - sqlBuilder.append("SELECT \n"); - sqlBuilder.append("has_identity\n"); - sqlBuilder.append("FROM dba_tables\n"); - sqlBuilder.append("WHERE\n"); - sqlBuilder.append(String.format("UPPER(table_name) = UPPER('%s')\n", targetTableName)); - sqlBuilder.append(String.format(" AND UPPER(owner) = UPPER('%s')\n", schema)); - // sqlBuilder.append(";\n"); + sql = String.format( + "SELECT has_identity FROM all_tables WHERE UPPER(table_name) = UPPER('%s') AND UPPER(owner) = UPPER('%s')", + targetTableName, dsConfig.getSchema()); } else if (dbProvider.isHanaUsed()) { - // get schema name - final String schema = dsConfig.getSchema(); - sqlBuilder.append("SELECT \n"); - sqlBuilder.append("is_insert_only\n"); - sqlBuilder.append("FROM public.tables\n"); - sqlBuilder.append("WHERE\n"); - sqlBuilder.append(String.format("table_name = UPPER('%s')\n", targetTableName)); - sqlBuilder.append(String.format(" AND schema_name = UPPER('%s')\n", schema)); - // sqlBuilder.append(";\n"); - } - else { - sqlBuilder.append("SELECT \n"); - sqlBuilder.append("count(*)\n"); - sqlBuilder.append("FROM sys.columns\n"); - sqlBuilder.append("WHERE\n"); - sqlBuilder.append(String.format("object_id = object_id('%s')\n", targetTableName)); - sqlBuilder.append("AND\n"); - sqlBuilder.append("is_identity = 1\n"); - sqlBuilder.append(";\n"); - } - LOG.debug("IDENTITY check SQL -> " + sqlBuilder); - return sqlBuilder; - } - // ORACLE_TARGET -- END - - // ORACLE_TARGET -- START Helper Function 2 - private boolean checkIdentityfromResultSet(final ResultSet resultSet, final DataBaseProvider dbProvider) - throws SQLException { - boolean requiresIdentityInsert = false; - - final String dbName = dbProvider.getDbName().toLowerCase(); - if (resultSet.next()) { - if (dbProvider.isMssqlUsed()) { - requiresIdentityInsert = resultSet.getInt(1) > 0; - } else if (dbProvider.isOracleUsed()) { - requiresIdentityInsert = resultSet.getBoolean(1); - } else if (dbProvider.isHanaUsed()) { - requiresIdentityInsert = resultSet.getBoolean(1); - } else{ - requiresIdentityInsert = resultSet.getInt(1) > 0; - } + sql = String.format( + "SELECT is_insert_only FROM public.tables WHERE table_name = UPPER('%s') AND schema_name = UPPER('%s')", + targetTableName, dsConfig.getSchema()); + } else { + throw new UnsupportedOperationException( + "Database type '" + dbProvider.getDbName() + "' does not require identity insert state changes"); } - return requiresIdentityInsert; - + LOG.debug("IDENTITY check SQL: " + sql); + return sql; } - // ORACLE_TARGET -- END - // ORACLE_TARGET -- START private boolean requiresIdentityInsert(final String targetTableName, final Connection targetConnection, - final DataBaseProvider dbProvider, final DataSourceConfiguration dsConfig) { - final StringBuilder sqlBuilder = buildSqlForIdentityInsertCheck(targetTableName, dbProvider, dsConfig); - - try ( - final Statement statement = targetConnection.createStatement(); - final ResultSet resultSet = statement.executeQuery(sqlBuilder.toString()); - ){ - final boolean requiresIdentityInsert = checkIdentityfromResultSet(resultSet, dbProvider); + final DataBaseProvider dbProvider, final DataSourceConfiguration dsConfig) { + if (dbProvider.isPostgreSqlUsed() || dbProvider == DataBaseProvider.MYSQL) { + return false; + } - return requiresIdentityInsert; + try (final Statement statement = targetConnection.createStatement(); + final ResultSet resultSet = statement + .executeQuery(buildSqlForIdentityInsertCheck(targetTableName, dbProvider, dsConfig))) { + return resultSet.next() && resultSet.getBoolean(1); } catch (SQLException e) { - throw new RuntimeException(e); - } - } - // ORACLE_TARGET -- END + throw new RuntimeException("Failed to check identity insert state", e); + } catch (UnsupportedOperationException e) { + LOG.debug("Unsupported identity check: {}", e.getMessage()); - private boolean requiresIdentityInsert(String targetTableName, Connection targetConnection) { - StringBuilder sqlBuilder = new StringBuilder(); - sqlBuilder.append("SELECT \n"); - sqlBuilder.append("count(*)\n"); - sqlBuilder.append("FROM sys.columns\n"); - sqlBuilder.append("WHERE\n"); - sqlBuilder.append(String.format("object_id = object_id('%s')\n", targetTableName)); - sqlBuilder.append("AND\n"); - sqlBuilder.append("is_identity = 1\n"); - sqlBuilder.append(";\n"); - try ( - Statement statement = targetConnection.createStatement(); - ResultSet resultSet = statement.executeQuery(sqlBuilder.toString()); - ) { - boolean requiresIdentityInsert = false; - if (resultSet.next()) { - requiresIdentityInsert = resultSet.getInt(1) > 0; - } - return requiresIdentityInsert; - } - catch (SQLException e) { - throw new RuntimeException(e); - } - catch (Exception e) { - throw new RuntimeException(e); + return false; } } - private RetriableTask createWriterTask(DataWriterContext dwc) { + private RetriableTask createWriterTask(CopyPipeWriterContext dwc, DataSet dataSet) { MigrationContext ctx = dwc.getContext().getMigrationContext(); - if(ctx.isDeletionEnabled()){ - return new DataDeleteWriterTask(dwc); + if (ctx.isDeletionEnabled()) { + return new DataDeleteWriterTask(dwc, dataSet); } else { - - if (!ctx.isBulkCopyEnabled()) { - return new DataWriterTask(dwc); - } else { - boolean noNullification = dwc.getNullifyColumns().isEmpty(); - boolean noIncremental = !ctx.isIncrementalModeEnabled(); - boolean noColumnOverride = !isColumnOverride(dwc.getContext(), dwc.getCopyItem()); - if (noNullification && noIncremental && noColumnOverride) { - LOG.warn("EXPERIMENTAL: Using bulk copy for {}", - dwc.getCopyItem().getTargetItem()); - return new DataBulkWriterTask(dwc); - } else { - return new DataWriterTask(dwc); - } - } + return new CopyPipeWriterTask(dwc, dataSet); } } - - private static class DataWriterContext { - private CopyContext context; - private CopyContext.DataCopyItem copyItem; - private DataSet dataSet; - private List columnsToCopy; - private Set nullifyColumns; - private PerformanceRecorder performanceRecorder; - private AtomicLong totalCount; - private List upsertIds; - private boolean requiresIdentityInsert; - - public DataWriterContext(CopyContext context, CopyContext.DataCopyItem copyItem, DataSet dataSet, List columnsToCopy, Set nullifyColumns, PerformanceRecorder performanceRecorder, AtomicLong totalCount, List upsertIds, boolean requiresIdentityInsert) { - this.context = context; - this.copyItem = copyItem; - this.dataSet = dataSet; - this.columnsToCopy = columnsToCopy; - this.nullifyColumns = nullifyColumns; - this.performanceRecorder = performanceRecorder; - this.totalCount = totalCount; - this.upsertIds = upsertIds; - this.requiresIdentityInsert = requiresIdentityInsert; - } - - public CopyContext getContext() { - return context; - } - - public CopyContext.DataCopyItem getCopyItem() { - return copyItem; - } - - public DataSet getDataSet() { - return dataSet; - } - - public List getColumnsToCopy() { - return columnsToCopy; - } - - public Set getNullifyColumns() { - return nullifyColumns; - } - - public PerformanceRecorder getPerformanceRecorder() { - return performanceRecorder; - } - - public AtomicLong getTotalCount() { - return totalCount; - } - - public List getUpsertId() { - return upsertIds; - } - - public boolean isRequiresIdentityInsert() { - return requiresIdentityInsert; - } - } - - private class DataWriterTask extends RetriableTask { - - private DataWriterContext ctx; - - public DataWriterTask(DataWriterContext ctx) { - super(ctx.getContext(), ctx.getCopyItem().getTargetItem()); - this.ctx = ctx; - } - - @Override - protected Boolean internalRun() { - try { - if (!ctx.getDataSet().getAllResults().isEmpty()) { - process(); - } - return Boolean.TRUE; - } catch (Exception e) { - //LOG.error("Error while executing table task " + ctx.getCopyItem().getTargetItem(),e); - throw new RuntimeException("Error processing writer task for " + ctx.getCopyItem().getTargetItem(), e); - } - } - - private void process() throws Exception { - Connection connection = null; - Boolean originalAutoCommit = null; - boolean requiresIdentityInsert = ctx.isRequiresIdentityInsert(); - try { - connection = ctx.getContext().getMigrationContext().getDataTargetRepository().getConnection(); - // ORACLE_TARGET - START Fetch the provider to figure out the - // name of the DBName - final DataBaseProvider dbProvider = ctx.getContext().getMigrationContext().getDataTargetRepository() - .getDatabaseProvider(); - LOG.debug("TARGET DB name = " + dbProvider.getDbName() + " SOURCE TABLE = " + ctx.getCopyItem().getSourceItem() - + ", TARGET Table = " + ctx.getCopyItem().getTargetItem()); - /* - * if - * (ctx.getCopyItem().getTargetItem().equalsIgnoreCase("medias") - * ) { return; } - */ - // ORACLE_TARGET - END Fetch the provider to figure out the name - // of the DBName - originalAutoCommit = connection.getAutoCommit(); - try (PreparedStatement bulkWriterStatement = createPreparedStatement(ctx.getContext(), ctx.getCopyItem().getTargetItem(), ctx.getColumnsToCopy(), ctx.getUpsertId(), connection); - Statement tempStmt = connection.createStatement(); - ResultSet tempTargetRs = tempStmt.executeQuery(String.format("select * from %s where 0 = 1", ctx.getCopyItem().getTargetItem()))) { - connection.setAutoCommit(false); - if (requiresIdentityInsert) { - switchIdentityInsert(connection, ctx.getCopyItem().getTargetItem(), true); - } - // ORACLE_TARGET - START - just to print once, helpful to - // debug issues at the time of actual copy. - boolean printed2004 = false; - boolean printed2005 = false; - final boolean printedDef = false; - // ORACLE_TARGET - END - just to print once, helpful to - // debug issues at the time of actual copy. - for (List row : ctx.getDataSet().getAllResults()) { - int sourceColumnTypeIdx = 0; - int paramIdx = 1; - for (String sourceColumnName : ctx.getColumnsToCopy()) { - int targetColumnIdx = tempTargetRs.findColumn(sourceColumnName); - DataColumn sourceColumnType = ((DefaultDataSet) ctx.getDataSet()).getColumnOrder().get(sourceColumnTypeIdx); - int targetColumnType = tempTargetRs.getMetaData().getColumnType(targetColumnIdx); - if (ctx.getNullifyColumns().contains(sourceColumnName)) { - bulkWriterStatement.setNull(paramIdx, targetColumnType); - LOG.trace("Column {} is nullified. Setting NULL value...", sourceColumnName); - } else { - if (isColumnOverride(ctx.getContext(), ctx.getCopyItem(), sourceColumnName)) { - bulkWriterStatement.setObject(paramIdx, ctx.getCopyItem().getColumnMap().get(sourceColumnName), targetColumnType); - } else { - Object sourceColumnValue = null; - if(dbProvider.isPostgreSqlUsed()){ - sourceColumnValue = ctx.getDataSet().getColumnValueForPostGres(sourceColumnName, row,sourceColumnType,targetColumnType); - } - else if(dbProvider.isHanaUsed()){ - sourceColumnValue = ((DefaultDataSet) ctx.getDataSet()).getColumnValueForHANA(sourceColumnName, row,sourceColumnType,targetColumnType); - } - else{ - sourceColumnValue = ctx.getDataSet().getColumnValue(sourceColumnName, row); - } - if (sourceColumnValue != null) { - // ##ORACLE_TARGET -- START TRY-catch to - // catch all exceptions, not to print - // each time, print one for each - // type/worker. - try { - if (! dbProvider.isOracleUsed()) { - // for all cases non-oracle - bulkWriterStatement.setObject(paramIdx, sourceColumnValue, - targetColumnType); - } else { - // if type is oracle, then there - // are a bunch of exceptions - // when the type is 2004, 2005 - // 2004 = BLOB , 2005 = CLOB - switch (targetColumnType) { - /* - * code to handle BLOB, because - * setObject throws exception - * example Products.p_buyerids - * is varbinary(max) in - * (sqlserver) AND blob in - * (oracle) - */ - // TODO Use Constant definitions - case 2004: { - // temp debug code - start - // ....only to print once.. - if (!printed2004) { - LOG.debug("BLOB 2004 sourceColumnName = " + sourceColumnName - + " souce value type CN=" - + sourceColumnValue.getClass().getCanonicalName() - + " , Name = " + sourceColumnValue.getClass().getName() - + " , Type Name = " - + sourceColumnValue.getClass().getTypeName()); - printed2004 = true; - } - // temp debug code end - bulkWriterStatement.setBytes(paramIdx, (byte[]) sourceColumnValue); - break; - - } - /* - * code to handle CLOB, because - * setObject throws exception - * example Promotion.description - * is nvarchar(max) in - * (sqlserver) AND blob in - * (oracle) - */ - case 2005: { - // temp debug code - start - // ....only to print once.. - if (!printed2005) { - LOG.debug("CLOB 2005 sourceColumnName = " + sourceColumnName - + " souce value type CN=" - + sourceColumnValue.getClass().getCanonicalName() - + " , Name = " + sourceColumnValue.getClass().getName() - + " , Type Name = " - + sourceColumnValue.getClass().getTypeName()); - printed2005 = true; - } - // temp debug code end - // CLOB or NCLOB ?? String - // -> StringReader - // bulkWriterStatement.setBytes(paramIdx, - // (byte[]) - // sourceColumnValue); - // bulkWriterStatement.setClob(paramIdx, - // (Clob) - // sourceColumnValue); - if (sourceColumnValue instanceof java.lang.String) { - final String clobString = (String) sourceColumnValue; - // typically a - // StringReader is - // enough, but exception - // occurs when the value - // is empty...therefore - // set to null - if (!clobString.isEmpty()) { - LOG.debug(" reading CLOB"); - // LOG.info("CLOB is - // not empty"); - bulkWriterStatement.setClob(paramIdx, - new StringReader((String) sourceColumnValue), - ((String) sourceColumnValue).length()); - LOG.debug(" wrote CLOB"); - } else { - LOG.debug("CLOB is empty...setting null"); - bulkWriterStatement.setNull(paramIdx, targetColumnType); - } - } - break; - } - default: { - bulkWriterStatement.setObject(paramIdx, sourceColumnValue, - targetColumnType); - break; - } - } - - } - } catch (final NumberFormatException e) { - /* - * To handle SqlServer CHAR -> - * Oracle Number. example - * Medias.p_fieldseparator - */ - LOG.error( - "NumberFormatException - Error setting Type on sourceColumnName = " - + sourceColumnName + ", sourceColumnValue = " - + sourceColumnValue + ", targetColumnType =" - + targetColumnType + ", source type = " - + sourceColumnValue.getClass().getTypeName()); - if (dbProvider.isOracleUsed()) { - if (sourceColumnValue instanceof java.lang.String) { - final char character = sourceColumnValue.toString().charAt(0); - final int ascii = character; - // 2 is NUMBER..need to use - // constants NOW - if (targetColumnType == 2) { - // bulkWriterStatement.setIn(paramIdx, - // ascii, - // targetColumnType); - bulkWriterStatement.setInt(paramIdx, ascii); - } - } - } - } catch (final Exception e) { - LOG.error("Error setting Type on sourceColumnName = " + sourceColumnName - + ", sourceColumnValue = " + sourceColumnValue - + ", targetColumnType =" + targetColumnType + ", source type = " - + sourceColumnValue.getClass().getTypeName(), e); - throw e; - } - // ##ORACLE_TARGET -- END TRY-catch temp - // to catch this BLOB Copy issue. - } else { - // for all cases oracle/sqlserver... - bulkWriterStatement.setNull(paramIdx, targetColumnType); - } - } - } - paramIdx += 1; - sourceColumnTypeIdx +=1; - } - - bulkWriterStatement.addBatch(); - } - - final int batchCount = ctx.getDataSet().getAllResults().size(); - executeBatch(ctx.getCopyItem(), bulkWriterStatement, batchCount, ctx.getPerformanceRecorder()); - bulkWriterStatement.clearParameters(); - bulkWriterStatement.clearBatch(); - connection.commit(); - // LOG.info("$$ updating progress from data wtiter task"); - final long totalCount = ctx.getTotalCount().addAndGet(batchCount); - updateProgress(ctx.getContext(), ctx.getCopyItem(), totalCount); - } - } catch (final Exception e) { - if (connection != null) { - connection.rollback(); - } - throw e; - } finally { - if (connection != null && originalAutoCommit != null) { - connection.setAutoCommit(originalAutoCommit); - } - if (connection != null && ctx != null) { - if (requiresIdentityInsert) { - switchIdentityInsert(connection, ctx.getCopyItem().getTargetItem(), false); - } - connection.close(); - } - } - } - } - - private class DataBulkWriterTask extends RetriableTask { - - private DataWriterContext ctx; - - public DataBulkWriterTask(DataWriterContext ctx) { - super(ctx.getContext(), ctx.getCopyItem().getTargetItem()); - this.ctx = ctx; - } - - @Override - protected Boolean internalRun() { - try { - if (!ctx.getDataSet().getAllResults().isEmpty()) { - process(); - } - return Boolean.TRUE; - } catch (Exception e) { - //LOG.error("Error while executing table task " + ctx.getCopyItem().getTargetItem(),e); - throw new RuntimeException("Error processing writer task for " + ctx.getCopyItem().getTargetItem(), e); - } - } - - private void process() throws Exception { - Connection connection = null; - Boolean originalAutoCommit = null; - try { - connection = ctx.getContext().getMigrationContext().getDataTargetRepository().getConnection(); - originalAutoCommit = connection.getAutoCommit(); - connection.setAutoCommit(false); - SQLServerBulkCopy bulkCopy = new SQLServerBulkCopy(connection.unwrap(SQLServerConnection.class)); - SQLServerBulkCopyOptions copyOptions = new SQLServerBulkCopyOptions(); - copyOptions.setBulkCopyTimeout(0); - copyOptions.setBatchSize(ctx.getContext().getMigrationContext().getReaderBatchSize()); - bulkCopy.setBulkCopyOptions(copyOptions); - bulkCopy.setDestinationTableName(ctx.getCopyItem().getTargetItem()); - - try (Statement tempStmt = connection.createStatement(); - ResultSet tempTargetRs = tempStmt.executeQuery(String.format("select * from %s where 0 = 1", ctx.getCopyItem().getTargetItem()))) { - for (String column : ctx.getColumnsToCopy()) { - int targetColumnIdx = tempTargetRs.findColumn(column); - bulkCopy.addColumnMapping(column, targetColumnIdx); - } - } - bulkCopy.writeToServer(ctx.getDataSet().toSQLServerBulkData()); - connection.commit(); - final Stopwatch timer = Stopwatch.createStarted(); - int bulkCount = ctx.getDataSet().getAllResults().size(); - LOG.debug("Bulk written ({} items) for table '{}' in {}", bulkCount, ctx.getCopyItem().getTargetItem(), timer.stop().toString()); - ctx.getPerformanceRecorder().record(PerformanceUnit.ROWS, bulkCount); - long totalCount = ctx.getTotalCount().addAndGet(bulkCount); - updateProgress(ctx.getContext(), ctx.getCopyItem(), totalCount); - } catch (Exception e) { - if (connection != null) { - connection.rollback(); - } - throw e; - } finally { - if (connection != null && originalAutoCommit != null) { - connection.setAutoCommit(originalAutoCommit); - } - if (connection != null && ctx != null) { - connection.close(); - } - } - } - } - - private class DataDeleteWriterTask extends RetriableTask { - - private DataWriterContext ctx; - - public DataDeleteWriterTask(DataWriterContext ctx) { - super(ctx.getContext(), ctx.getCopyItem().getTargetItem()); - this.ctx = ctx; - } - - @Override - protected Boolean internalRun() { - try { - if (!ctx.getDataSet().getAllResults().isEmpty()) { - if(ctx.getContext().getMigrationContext().isDeletionEnabled()){ - process(); - } - } - return Boolean.TRUE; - } catch (Exception e) { - //LOG.error("Error while executing table task " + ctx.getCopyItem().getTargetItem(),e); - throw new RuntimeException("Error processing writer task for " + ctx.getCopyItem().getTargetItem(), e); - } - } - - private void process() throws Exception { - Connection connection = null; - Boolean originalAutoCommit = null; - String PK = "PK"; - boolean requiresIdentityInsert = ctx.isRequiresIdentityInsert(); - try { - connection = ctx.getContext().getMigrationContext().getDataTargetRepository().getConnection(); - originalAutoCommit = connection.getAutoCommit(); - // ORACLE_TARGET - START - String sqlDelete = ""; - if ("oracle".equalsIgnoreCase(ctx.getContext().getMigrationContext().getDataTargetRepository() - .getDatabaseProvider().getDbName())) { - sqlDelete = getBulkDeleteStatementOracle(ctx.getCopyItem().getTargetItem(), PK); - } else { - sqlDelete = getBulkDeleteStatement(ctx.getCopyItem().getTargetItem(), PK); - } - // ORACLE_TARGET - END - try (PreparedStatement bulkWriterStatement = connection.prepareStatement( - getBulkDeleteStatement(ctx.getCopyItem().getTargetItem() , PK));) { - connection.setAutoCommit(false); - for (List row : ctx.getDataSet().getAllResults()) { - int paramIdx = 1; - Long pkValue = (Long) ctx.getDataSet() - .getColumnValue("p_itempk", row); - bulkWriterStatement.setObject(paramIdx, pkValue); - - paramIdx += 1; - bulkWriterStatement.addBatch(); - } - int batchCount = ctx.getDataSet().getAllResults().size(); - executeBatch(ctx.getCopyItem(), bulkWriterStatement, batchCount, ctx.getPerformanceRecorder()); - bulkWriterStatement.clearParameters(); - bulkWriterStatement.clearBatch(); - connection.commit(); - long totalCount = ctx.getTotalCount().addAndGet(batchCount); - updateProgress(ctx.getContext(), ctx.getCopyItem(), totalCount); - } - } catch (Exception e) { - if (connection != null) { - connection.rollback(); - } - throw e; - } finally { - if (connection != null && originalAutoCommit != null) { - connection.setAutoCommit(originalAutoCommit); - } - if (connection != null && ctx != null) { - if (requiresIdentityInsert) { - switchIdentityInsert(connection, ctx.getCopyItem().getTargetItem(), false); - } - connection.close(); - } - } - } - - private List getListColumn() { - final String columns = "PK"; - if (StringUtils.isEmpty(columns)) { - return Collections.emptyList(); - } - List result = Splitter.on(",") - .omitEmptyStrings() - .trimResults() - .splitToList(columns); - - return result; - } - } - } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java new file mode 100644 index 0000000..53e6d11 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/CopyPipeWriterTask.java @@ -0,0 +1,277 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.strategy.impl; + +import com.google.common.base.Stopwatch; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.RetriableTask; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.dataset.DataColumn; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; +import de.hybris.bootstrap.ddl.DataBaseProvider; +import org.apache.commons.collections.MapUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.StringReader; +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.ResultSet; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; +import java.util.stream.Collectors; + +import static java.sql.Types.BLOB; +import static java.sql.Types.CLOB; +import static java.sql.Types.NUMERIC; + +class CopyPipeWriterTask extends RetriableTask { + + private static final Logger LOG = LoggerFactory.getLogger(CopyPipeWriterTask.class); + + private final CopyPipeWriterContext ctx; + private final DataSet dataSet; + + public CopyPipeWriterTask(CopyPipeWriterContext ctx, DataSet dataSet) { + super(ctx.getContext(), ctx.getCopyItem().getTargetItem()); + this.ctx = ctx; + this.dataSet = dataSet; + } + + @Override + protected Boolean internalRun() { + try { + if (dataSet.isNotEmpty()) { + process(); + } + return Boolean.TRUE; + } catch (Exception e) { + throw new RuntimeException("Error processing writer task for " + ctx.getCopyItem().getTargetItem(), e); + } + } + + private boolean isColumnOverride(CopyContext.DataCopyItem item, String sourceColumnName) { + return MapUtils.isNotEmpty(item.getColumnMap()) && item.getColumnMap().containsKey(sourceColumnName); + } + + private void switchIdentityInsert(Connection connection, final String tableName, boolean on) { + try (Statement stmt = connection.createStatement()) { + String onOff = on ? "ON" : "OFF"; + stmt.executeUpdate(String.format("SET IDENTITY_INSERT %s %s", tableName, onOff)); + } catch (final Exception e) { + // TODO using brute force FIX + // throw new RuntimeException("Could not switch identity insert", e); + } + } + + private String getBulkInsertStatement(String targetTableName, List columnsToCopy, + List columnsToCopyValues) { + return "INSERT INTO " + targetTableName + " " + + getBulkInsertStatementParamList(columnsToCopy, columnsToCopyValues); + } + + private String getBulkInsertStatementParamList(List columnsToCopy, List columnsToCopyValues) { + return "(" + String.join(", ", columnsToCopy) + ") VALUES (" + String.join(", ", columnsToCopyValues) + ")"; + } + + private PreparedStatement createPreparedStatement(final CopyContext context, final String targetTableName, + final List columnsToCopy, final List upsertIds, final Connection targetConnection) + throws Exception { + if (context.getMigrationContext().isIncrementalModeEnabled()) { + if (!upsertIds.isEmpty()) { + final String upsertStatement = context.getMigrationContext().getDataTargetRepository() + .buildBulkUpsertStatement(targetTableName, columnsToCopy, upsertIds); + + LOG.debug("Upsert statement for: {}\n{}", + context.getMigrationContext().getDataTargetRepository().getDatabaseProvider(), upsertStatement); + + return targetConnection.prepareStatement(upsertStatement); + } else { + throw new RuntimeException( + "The incremental approach can only be used on tables that have a valid identifier like PK or ID"); + } + } else { + return targetConnection.prepareStatement(getBulkInsertStatement(targetTableName, columnsToCopy, + columnsToCopy.stream().map(column -> "?").collect(Collectors.toList()))); + } + } + + private void executeBatch(CopyContext.DataCopyItem item, PreparedStatement preparedStatement, long batchCount, + PerformanceRecorder recorder) throws SQLException { + final Stopwatch timer = Stopwatch.createStarted(); + preparedStatement.executeBatch(); + preparedStatement.clearBatch(); + LOG.debug("Batch written ({} items) for table '{}' in {}", batchCount, item.getTargetItem(), timer.stop()); + recorder.record(PerformanceUnit.ROWS, batchCount); + } + + private void process() throws Exception { + Connection connection = null; + Boolean originalAutoCommit = null; + boolean requiresIdentityInsert = ctx.isRequiresIdentityInsert(); + try { + connection = ctx.getContext().getMigrationContext().getDataTargetRepository().getConnection(); + + final DataBaseProvider dbProvider = ctx.getContext().getMigrationContext().getDataTargetRepository() + .getDatabaseProvider(); + LOG.debug("TARGET DB name = " + dbProvider.getDbName() + " SOURCE TABLE = " + + ctx.getCopyItem().getSourceItem() + ", TARGET Table = " + ctx.getCopyItem().getTargetItem()); + + originalAutoCommit = connection.getAutoCommit(); + + try (PreparedStatement bulkWriterStatement = createPreparedStatement(ctx.getContext(), + ctx.getCopyItem().getTargetItem(), ctx.getColumnsToCopy(), ctx.getUpsertIds(), connection); + Statement tempStmt = connection.createStatement(); + ResultSet tempTargetRs = tempStmt.executeQuery( + String.format("select * from %s where 0 = 1", ctx.getCopyItem().getTargetItem()))) { + connection.setAutoCommit(false); + if (requiresIdentityInsert) { + switchIdentityInsert(connection, ctx.getCopyItem().getTargetItem(), true); + } + + boolean printedBlobLog = false; + boolean printedClobLog = false; + + for (List row : dataSet.getAllResults()) { + int paramIdx = 1; + for (String sourceColumnName : ctx.getColumnsToCopy()) { + int targetColumnIdx = tempTargetRs.findColumn(sourceColumnName); + DataColumn sourceColumnType = dataSet.getColumn(paramIdx - 1); + int targetColumnType = tempTargetRs.getMetaData().getColumnType(targetColumnIdx); + if (ctx.getNullifyColumns().contains(sourceColumnName)) { + bulkWriterStatement.setNull(paramIdx, targetColumnType); + LOG.trace("Column {} is nullified. Setting NULL value...", sourceColumnName); + } else { + if (isColumnOverride(ctx.getCopyItem(), sourceColumnName)) { + bulkWriterStatement.setObject(paramIdx, + ctx.getCopyItem().getColumnMap().get(sourceColumnName), targetColumnType); + } else { + Object sourceColumnValue = dataSet.getColumnValue(sourceColumnName, row, + sourceColumnType, targetColumnType); + if (sourceColumnValue != null) { + // catch all exceptions, not to print each time, print one for each type/worker. + try { + if (!dbProvider.isOracleUsed()) { + // for all cases non-oracle + bulkWriterStatement.setObject(paramIdx, sourceColumnValue, + targetColumnType); + } else { + switch (targetColumnType) { + /* + * code to handle BLOB, because setObject throws exception example + * Products.p_buyerids is varbinary(max) in (sqlserver) AND blob in + * (oracle) + */ + case BLOB : { + if (!printedBlobLog) { + LOG.debug("BLOB 2004 sourceColumnName = " + sourceColumnName + + " souce value type CN = " + + sourceColumnValue.getClass().getCanonicalName() + + " , Name = " + sourceColumnValue.getClass().getName() + + " , Type Name = " + + sourceColumnValue.getClass().getTypeName()); + printedBlobLog = true; + } + bulkWriterStatement.setBytes(paramIdx, (byte[]) sourceColumnValue); + break; + + } + /* + * code to handle CLOB, because setObject throws exception example + * Promotion.description is nvarchar(max) in (sqlserver) AND blob in + * (oracle) + */ + case CLOB : { + if (!printedClobLog) { + LOG.debug("CLOB 2005 sourceColumnName = " + sourceColumnName + + " souce value type CN = " + + sourceColumnValue.getClass().getCanonicalName() + + " , Name = " + sourceColumnValue.getClass().getName() + + " , Type Name = " + + sourceColumnValue.getClass().getTypeName()); + printedClobLog = true; + } + if (sourceColumnValue instanceof String clobString) { + if (!clobString.isEmpty()) { + LOG.debug(" reading CLOB"); + bulkWriterStatement.setClob(paramIdx, + new StringReader(clobString), clobString.length()); + LOG.debug(" wrote CLOB"); + } else { + LOG.debug("CLOB is empty... setting null"); + bulkWriterStatement.setNull(paramIdx, targetColumnType); + } + } + break; + } + default : { + bulkWriterStatement.setObject(paramIdx, sourceColumnValue, + targetColumnType); + break; + } + } + + } + } catch (final NumberFormatException e) { + LOG.error("NumberFormatException - Error setting Type on sourceColumnName = " + + sourceColumnName + ", sourceColumnValue = " + sourceColumnValue + + ", targetColumnType =" + targetColumnType + ", source type = " + + sourceColumnValue.getClass().getTypeName()); + if (dbProvider.isOracleUsed()) { + if (targetColumnType == NUMERIC + && sourceColumnValue instanceof String stringValue + && !stringValue.isEmpty()) { + final int character = Character.codePointAt(stringValue, 0); + bulkWriterStatement.setInt(paramIdx, character); + } + } + } catch (final Exception e) { + LOG.error("Error setting Type on sourceColumnName = " + sourceColumnName + + ", sourceColumnValue = " + sourceColumnValue + ", targetColumnType =" + + targetColumnType + ", source type = " + + sourceColumnValue.getClass().getTypeName(), e); + throw e; + } + } else { + bulkWriterStatement.setNull(paramIdx, targetColumnType); + } + } + } + paramIdx++; + } + bulkWriterStatement.addBatch(); + } + int batchCount = dataSet.getAllResults().size(); + executeBatch(ctx.getCopyItem(), bulkWriterStatement, batchCount, ctx.getPerformanceRecorder()); + bulkWriterStatement.clearParameters(); + bulkWriterStatement.clearBatch(); + connection.commit(); + ctx.getDatabaseCopyTaskRepository().markBatchCompleted(ctx.getContext(), ctx.getCopyItem(), + dataSet.getBatchId()); + long totalCount = ctx.getTotalCount().addAndGet(batchCount); + ctx.getDatabaseCopyTaskRepository().updateTaskProgress(ctx.getContext(), ctx.getCopyItem(), totalCount); + } + } catch (Exception e) { + if (connection != null) { + connection.rollback(); + } + throw e; + } finally { + if (connection != null && originalAutoCommit != null) { + connection.setAutoCommit(originalAutoCommit); + } + if (connection != null) { + if (requiresIdentityInsert) { + switchIdentityInsert(connection, ctx.getCopyItem().getTargetItem(), false); + } + connection.close(); + } + } + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java new file mode 100644 index 0000000..5325ee7 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/strategy/impl/DataDeleteWriterTask.java @@ -0,0 +1,151 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.strategy.impl; + +import com.google.common.base.Stopwatch; +import com.sap.cx.boosters.commercedbsync.concurrent.impl.task.RetriableTask; +import com.sap.cx.boosters.commercedbsync.context.CopyContext; +import com.sap.cx.boosters.commercedbsync.dataset.DataSet; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceRecorder; +import com.sap.cx.boosters.commercedbsync.performance.PerformanceUnit; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.sql.Connection; +import java.sql.PreparedStatement; +import java.sql.SQLException; +import java.sql.Statement; +import java.util.List; + +import static java.sql.Types.BIGINT; + +public class DataDeleteWriterTask extends RetriableTask { + private static final Logger LOG = LoggerFactory.getLogger(DataDeleteWriterTask.class); + + private final CopyPipeWriterContext ctx; + private final DataSet dataSet; + + public DataDeleteWriterTask(CopyPipeWriterContext ctx, DataSet dataSet) { + super(ctx.getContext(), ctx.getCopyItem().getTargetItem()); + this.ctx = ctx; + this.dataSet = dataSet; + } + + @Override + protected Boolean internalRun() { + try { + if (dataSet.isNotEmpty()) { + if (ctx.getContext().getMigrationContext().isDeletionEnabled()) { + process(); + } + } + return Boolean.TRUE; + } catch (Exception e) { + // LOG.error("Error while executing table task " + + // ctx.getCopyItem().getTargetItem(),e); + throw new RuntimeException("Error processing writer task for " + ctx.getCopyItem().getTargetItem(), e); + } + } + + private void process() throws Exception { + Connection connection = null; + Boolean originalAutoCommit = null; + String PK = "PK"; + boolean requiresIdentityInsert = ctx.isRequiresIdentityInsert(); + try { + connection = ctx.getContext().getMigrationContext().getDataTargetRepository().getConnection(); + originalAutoCommit = connection.getAutoCommit(); + + String sqlDelete; + if (ctx.getContext().getMigrationContext().getDataTargetRepository().getDatabaseProvider().isOracleUsed()) { + sqlDelete = getBulkDeleteStatementOracle(ctx.getCopyItem().getTargetItem(), PK); + } else { + sqlDelete = getBulkDeleteStatement(ctx.getCopyItem().getTargetItem(), PK); + } + + try (PreparedStatement bulkWriterStatement = connection.prepareStatement(sqlDelete)) { + connection.setAutoCommit(false); + for (List row : dataSet.getAllResults()) { + Long pkValue = (Long) dataSet.getColumnValue("p_itempk", row, dataSet.getColumn(1), BIGINT); + bulkWriterStatement.setObject(1, pkValue); + bulkWriterStatement.addBatch(); + } + int batchCount = dataSet.getAllResults().size(); + executeBatch(ctx.getCopyItem(), bulkWriterStatement, batchCount, ctx.getPerformanceRecorder()); + bulkWriterStatement.clearParameters(); + bulkWriterStatement.clearBatch(); + connection.commit(); + long totalCount = ctx.getTotalCount().addAndGet(batchCount); + ctx.getDatabaseCopyTaskRepository().updateTaskProgress(ctx.getContext(), ctx.getCopyItem(), totalCount); + } + } catch (Exception e) { + if (connection != null) { + connection.rollback(); + } + throw e; + } finally { + if (connection != null && originalAutoCommit != null) { + connection.setAutoCommit(originalAutoCommit); + } + if (connection != null && ctx != null) { + if (requiresIdentityInsert) { + switchIdentityInsert(connection, ctx.getCopyItem().getTargetItem(), false); + } + connection.close(); + } + } + } + + protected void executeBatch(CopyContext.DataCopyItem item, PreparedStatement preparedStatement, long batchCount, + PerformanceRecorder recorder) throws SQLException { + final Stopwatch timer = Stopwatch.createStarted(); + preparedStatement.executeBatch(); + preparedStatement.clearBatch(); + LOG.debug("Batch written ({} items) for table '{}' in {}", batchCount, item.getTargetItem(), timer.stop()); + recorder.record(PerformanceUnit.ROWS, batchCount); + } + + private void switchIdentityInsert(Connection connection, final String tableName, boolean on) { + try (Statement stmt = connection.createStatement()) { + String onOff = on ? "ON" : "OFF"; + stmt.executeUpdate(String.format("SET IDENTITY_INSERT %s %s", tableName, onOff)); + } catch (final Exception e) { + throw new RuntimeException("Could not switch identity insert", e); + } + } + + private String getBulkDeleteStatement(String targetTableName, String columnId) { + /* + * https://michaeljswart.com/2017/07/sql-server-upsert-patterns-and- + * antipatterns/ We are not using a stored procedure here as CCv2 does not grant + * sp exec permission to the default db user + */ + StringBuilder sqlBuilder = new StringBuilder(); + sqlBuilder.append(String.format("MERGE %s WITH (HOLDLOCK) AS t", targetTableName)); + sqlBuilder.append("\n"); + sqlBuilder.append(String.format("USING (SELECT %s) AS s ON t.%s = s.%s", "? " + columnId, columnId, columnId)); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN MATCHED THEN DELETE"); // DELETE + sqlBuilder.append(";"); + LOG.debug("MERGE-DELETE SQL Server " + sqlBuilder); + return sqlBuilder.toString(); + } + + private String getBulkDeleteStatementOracle(final String targetTableName, final String columnId) { + final StringBuilder sqlBuilder = new StringBuilder(); + sqlBuilder.append(String.format("MERGE INTO %s t", targetTableName)); + sqlBuilder.append("\n"); + sqlBuilder.append( + String.format("USING (SELECT %s from dual) s ON (t.%s = s.%s)", "? " + columnId, columnId, columnId)); + sqlBuilder.append("\n"); + sqlBuilder.append("WHEN MATCHED THEN "); // DELETE + sqlBuilder.append("UPDATE SET t.HJMPTS = 0 "); // IS INSERT OR UPDATE + sqlBuilder.append("DELETE WHERE " + String.format(" t.%s = s.%s ", columnId, columnId));// DELETE + LOG.debug("MERGE-DELETE ORACLE " + sqlBuilder); + return sqlBuilder.toString(); + } +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/FileUtils.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/FileUtils.java new file mode 100644 index 0000000..99de317 --- /dev/null +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/FileUtils.java @@ -0,0 +1,28 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsync.utils; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.zip.ZipEntry; +import java.util.zip.ZipOutputStream; + +public class FileUtils { + + public static byte[] zipBytes(String filename, byte[] input) throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + ZipOutputStream zos = new ZipOutputStream(baos); + ZipEntry entry = new ZipEntry(filename); + entry.setSize(input.length); + zos.putNextEntry(entry); + zos.write(input); + zos.closeEntry(); + zos.close(); + return baos.toByteArray(); + } + +} diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/LocalDateTypeAdapter.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/LocalDateTypeAdapter.java index 9e71d0b..b8293d0 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/LocalDateTypeAdapter.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/LocalDateTypeAdapter.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.utils; import java.io.IOException; @@ -9,29 +15,23 @@ import com.google.gson.stream.JsonToken; import com.google.gson.stream.JsonWriter; +public class LocalDateTypeAdapter extends TypeAdapter { -public class LocalDateTypeAdapter extends TypeAdapter -{ - - @Override - public LocalDateTime read(JsonReader jsonReader) throws IOException - { - if (jsonReader.peek() == JsonToken.NULL) - { - jsonReader.nextNull(); - return null; - } - return ZonedDateTime.parse(jsonReader.nextString()).toLocalDateTime(); - } + @Override + public LocalDateTime read(JsonReader jsonReader) throws IOException { + if (jsonReader.peek() == JsonToken.NULL) { + jsonReader.nextNull(); + return null; + } + return ZonedDateTime.parse(jsonReader.nextString()).toLocalDateTime(); + } - @Override - public void write(final JsonWriter jsonWriter, final LocalDateTime localDate) throws IOException - { - if (localDate == null) - { - jsonWriter.nullValue(); - return; - } - jsonWriter.value(localDate.toString()); - } + @Override + public void write(final JsonWriter jsonWriter, final LocalDateTime localDate) throws IOException { + if (localDate == null) { + jsonWriter.nullValue(); + return; + } + jsonWriter.value(localDate.toString()); + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/MaskUtil.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/MaskUtil.java index 650e735..27b022c 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/MaskUtil.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/utils/MaskUtil.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/TableViewGenerator.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/TableViewGenerator.java index 50f8d43..464ee6a 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/TableViewGenerator.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/TableViewGenerator.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.views; import java.util.Map; @@ -15,107 +21,120 @@ public class TableViewGenerator { - private static final String VIEW_PREFIX = "CREATE OR ALTER VIEW ${name} AS SELECT ${columns} FROM ${from}"; - private static final String COLUMN_REPLACEMENT = "${replacement} AS ${column}"; + private static final String VIEW_PREFIX = "CREATE OR ALTER VIEW ${name} AS SELECT ${columns} FROM ${from}"; + private static final String COLUMN_REPLACEMENT = "${replacement} AS ${column}"; + + private static final Logger LOG = LoggerFactory.getLogger(TableViewGenerator.class); + + /** + * Generates {@link ViewConfigurationContext} context with all required data for + * VIEW DDL generation. + * + * @param tableName + * raw table name which should be a source of data + * @param migrationContext + * holds all information about source data + * @return + * @throws Exception + */ + public ViewConfigurationContext generateForTable(final String tableName, final MigrationContext migrationContext) + throws Exception { + try { + Set columns = migrationContext.getDataSourceRepository().getAllColumnNames(tableName); + if (columns.isEmpty()) { + return null; + } + String viewPrefix = migrationContext.getItemTypeViewNamePattern(); + String tableVieName = String.format(StringUtils.trimToEmpty(viewPrefix), tableName); + String whereView = migrationContext.getViewWhereClause(tableName); + Map customColumns = migrationContext.getCustomColumnsForView(tableName); + final String viewColumnPrefix = migrationContext.getViewColumnPrefixFor(tableName); + return new ViewConfigurationContext(tableName, tableVieName, columns, customColumns, whereView, + viewColumnPrefix); + } catch (Exception e) { + LOG.error(String.format("could not get source data repository for table: %s", tableName), e); + throw e; + } + } + + /** + * generates DDL VIEW definition based on ctx for given table. Template of that + * string is:
+ * CREATE OR ALTER VIEW ${name} AS SELECT ${columns} FROM ${from} + *

+ * where: + * + *

  • + *
      + * name - view name + *
    + *
      + * columns - view columns with optional custom column definition + *
    + *
      + * from - from source table, or optional custom DQL FROM section + *
    + *
  • + * + * @param ctx + * @return + */ + public String generateViewDefinition(final ViewConfigurationContext ctx) { + String columnList = generateColumnList(ctx); + Map params = Map.of("name", ctx.getView(), "columns", columnList, "from", + ctx.getAdditionalWhereClause()); + StringSubstitutor template = new StringSubstitutor(params); + String view = template.replace(VIEW_PREFIX); + LOG.debug(String.format("generated view for table %s: %s", ctx.getTable(), view)); + return view; + } - private static final Logger LOG = LoggerFactory.getLogger(TableViewGenerator.class); + /** + * method generates from context list of columns, where if there is custom + * column definition it will replace original one + * + * @param ctx + * @return comma separated list of columns for view built from ctx + */ + public String generateColumnList(final ViewConfigurationContext ctx) { + Set originalColumns = ctx.getOriginalColumns(); + Map replacements = ctx.getColumnReplacements(); + Set newColumnSet = originalColumns.stream().collect(Collectors.toSet()); - /** - * Generates {@link ViewConfigurationContext} context with all required data for VIEW DDL generation. - * @param tableName raw table name which should be a source of data - * @param migrationContext holds all information about source data - * @return - * @throws Exception - */ - public ViewConfigurationContext generateForTable(final String tableName, final MigrationContext migrationContext) - throws Exception { - try { - Set columns = migrationContext.getDataSourceRepository().getAllColumnNames(tableName); - if (columns.isEmpty()) { - return null; - } - String viewPrefix = migrationContext.getItemTypeViewNamePattern(); - String tableVieName = String.format(StringUtils.trimToEmpty(viewPrefix), tableName); - String whereView = migrationContext.getViewWhereClause(tableName); - Map customColumns = migrationContext.getCustomColumnsForView(tableName); - final String viewColumnPrefix = migrationContext.getViewColumnPrefixFor(tableName); - return new ViewConfigurationContext(tableName, tableVieName, columns, customColumns, whereView, viewColumnPrefix); - } catch (Exception e) { - LOG.error(String.format("could not get source data repository for table: %s", tableName), e); - throw e; - } - } + final String viewColumnPrefix = ctx.getViewColumnPrefix(); + if (StringUtils.isNotBlank(viewColumnPrefix)) { + newColumnSet = newColumnSet.stream().map(column -> viewColumnPrefix + "." + column) + .collect(Collectors.toSet()); + } - /** - * generates DDL VIEW definition based on ctx for given table. Template of that string is:
    - * CREATE OR ALTER VIEW ${name} AS SELECT ${columns} FROM ${from} - * - * where: - * - *
  • - *
      name - view name
    - *
      columns - view columns with optional custom column definition
    - *
      from - from source table, or optional custom DQL FROM section
    - *
  • - * @param ctx - * @return - */ - public String generateViewDefinition(final ViewConfigurationContext ctx) { - String columnList = generateColumnList(ctx); - Map params = Map.of("name", ctx.getView(), "columns", columnList, "from", - ctx.getAdditionalWhereClause()); - StringSubstitutor template = new StringSubstitutor(params); - String view = template.replace(VIEW_PREFIX); - LOG.debug(String.format("generated view for table %s: %s", ctx.getTable(), view)); - return view; - } + for (Entry entry : replacements.entrySet()) { + String replacementKey = entry.getKey(); + if (StringUtils.isNotBlank(viewColumnPrefix)) { + replacementKey = viewColumnPrefix + "." + replacementKey; + } - /** - * method generates from context list of columns, where if there is custom column definition it will replace original one - * @param ctx - * @return comma separated list of columns for view built from ctx - */ - public String generateColumnList(final ViewConfigurationContext ctx) { - Set originalColumns = ctx.getOriginalColumns(); - Map replacements = ctx.getColumnReplacements(); - Set newColumnSet = originalColumns.stream().collect(Collectors.toSet()); + if (!newColumnSet.contains(replacementKey)) { + LOG.warn(String.format("There is missing column %s in table %s, for custom definition %s", + entry.getKey(), ctx.getTable(), entry.getValue())); + } else { + Map columnMap = Map.of("replacement", entry.getValue(), "column", entry.getKey()); + StringSubstitutor t = new StringSubstitutor(columnMap); + String replacement = t.replace(COLUMN_REPLACEMENT); + // replace old one with new + newColumnSet.remove(replacementKey); + newColumnSet.add(replacement); + } + } - final String viewColumnPrefix = ctx.getViewColumnPrefix(); - if(StringUtils.isNotBlank(viewColumnPrefix)) - { - newColumnSet = newColumnSet.stream().map(column -> viewColumnPrefix + "." + column).collect(Collectors.toSet()); - } - - for (Entry entry : replacements.entrySet()) { - String replacementKey = entry.getKey(); - if(StringUtils.isNotBlank(viewColumnPrefix)) - { - replacementKey = viewColumnPrefix + "." + replacementKey; - } - - if (!newColumnSet.contains(replacementKey)) { - LOG.warn(String.format("There is missing column %s in table %s, for custom definition %s", - entry.getKey(), ctx.getTable(), entry.getValue())); - } else { - Map columnMap = Map.of("replacement", entry.getValue(), "column", entry.getKey()); - StringSubstitutor t = new StringSubstitutor(columnMap); - String replacement = t.replace(COLUMN_REPLACEMENT); - // replace old one with new - newColumnSet.remove(replacementKey); - newColumnSet.add(replacement); - } - } - - return Joiner.on(", ").join(newColumnSet); - } + return Joiner.on(", ").join(newColumnSet); + } - public static String getTableNameForView(final String tableName, final MigrationContext migrationContext) - { - final String viewPrefix = String.format(StringUtils.trimToEmpty(migrationContext.getItemTypeViewNamePattern()), ""); - if (tableName.startsWith(viewPrefix)) - { - return tableName.replace(viewPrefix, ""); - } - return tableName; - } + public static String getTableNameForView(final String tableName, final MigrationContext migrationContext) { + final String viewPrefix = String.format(StringUtils.trimToEmpty(migrationContext.getItemTypeViewNamePattern()), + ""); + if (tableName.startsWith(viewPrefix)) { + return tableName.replace(viewPrefix, ""); + } + return tableName; + } } diff --git a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/ViewConfigurationContext.java b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/ViewConfigurationContext.java index 73da5a6..374be38 100644 --- a/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/ViewConfigurationContext.java +++ b/commercedbsync/src/com/sap/cx/boosters/commercedbsync/views/ViewConfigurationContext.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.views; import java.util.Map; @@ -6,52 +12,50 @@ /** * Context holder for one table configuration. Object generated by * {@link TableViewGenerator}. - * - * @author i303764 * + * @author i303764 */ public class ViewConfigurationContext { - private final String additionalWhereClause; - private final Map columnReplacements; - private final Set originalColumns; - private final String table; - private final String view; - private final String viewColumnPrefix; - - public ViewConfigurationContext(String table, String view, Set originalColumns, - Map columnReplacements, String additionalWhereClause, String viewColumnPrefix) { - super(); - this.table = table; - this.view = view; - this.originalColumns = originalColumns; - this.columnReplacements = columnReplacements; - this.additionalWhereClause = additionalWhereClause; - this.viewColumnPrefix = viewColumnPrefix; - } - - public String getAdditionalWhereClause() { - return additionalWhereClause; - } - - public Map getColumnReplacements() { - return columnReplacements; - } - - public Set getOriginalColumns() { - return originalColumns; - } - - public String getTable() { - return table; - } - - public String getView() { - return view; - } - - public String getViewColumnPrefix() - { - return viewColumnPrefix; - } + private final String additionalWhereClause; + private final Map columnReplacements; + private final Set originalColumns; + private final String table; + private final String view; + private final String viewColumnPrefix; + + public ViewConfigurationContext(String table, String view, Set originalColumns, + Map columnReplacements, String additionalWhereClause, String viewColumnPrefix) { + super(); + this.table = table; + this.view = view; + this.originalColumns = originalColumns; + this.columnReplacements = columnReplacements; + this.additionalWhereClause = additionalWhereClause; + this.viewColumnPrefix = viewColumnPrefix; + } + + public String getAdditionalWhereClause() { + return additionalWhereClause; + } + + public Map getColumnReplacements() { + return columnReplacements; + } + + public Set getOriginalColumns() { + return originalColumns; + } + + public String getTable() { + return table; + } + + public String getView() { + return view; + } + + public String getViewColumnPrefix() { + return viewColumnPrefix; + } } diff --git a/commercedbsync/src/de/hybris/platform/azure/media/AzureCloudUtils.java b/commercedbsync/src/de/hybris/platform/azure/media/AzureCloudUtils.java index a8d7568..95936ff 100644 --- a/commercedbsync/src/de/hybris/platform/azure/media/AzureCloudUtils.java +++ b/commercedbsync/src/de/hybris/platform/azure/media/AzureCloudUtils.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -18,63 +18,57 @@ public class AzureCloudUtils { private static final char HYPHEN = '-'; private static final String DOUBLE_HYPHEN = "--"; - public AzureCloudUtils() { - } + public AzureCloudUtils() { + } - public static String computeContainerAddress(MediaStorageConfigService.MediaFolderConfig config) { - String configuredContainer = config.getParameter("containerAddress"); - String addressSuffix = StringUtils.isNotBlank(configuredContainer) - ? configuredContainer - : config.getFolderQualifier(); - String addressPrefix = getTenantPrefix(); - return toValidContainerName(addressPrefix + "-" + addressSuffix); - } + public static String computeContainerAddress(MediaStorageConfigService.MediaFolderConfig config) { + String configuredContainer = config.getParameter("containerAddress"); + String addressSuffix = StringUtils.isNotBlank(configuredContainer) + ? configuredContainer + : config.getFolderQualifier(); + String addressPrefix = getTenantPrefix(); + return toValidContainerName(addressPrefix + "-" + addressSuffix); + } - private static String toValidContainerName(String name) { - return name.toLowerCase().replaceAll("[/. !?]", "").replace('_', '-'); - } + private static String toValidContainerName(String name) { + return name.toLowerCase().replaceAll("[/. !?]", "").replace('_', '-'); + } - private static String toValidPrefixName(String name) { - return name.toLowerCase().replaceAll("[/. !?_-]", ""); - } + private static String toValidPrefixName(String name) { + return name.toLowerCase().replaceAll("[/. !?_-]", ""); + } - private static String getTenantPrefix() { - // return "sys-" + - // Registry.getCurrentTenantNoFallback().getTenantID().toLowerCase(); - String defaultPrefix = Registry.getCurrentTenantNoFallback().getTenantID(); - String prefix = toValidPrefixName(Config.getString("db.tableprefix", defaultPrefix)); - return "sys-" + prefix.toLowerCase(); - } + private static String getTenantPrefix() { + // return "sys-" + + // Registry.getCurrentTenantNoFallback().getTenantID().toLowerCase(); + String defaultPrefix = Registry.getCurrentTenantNoFallback().getTenantID(); + String prefix = toValidPrefixName(Config.getString("db.tableprefix", defaultPrefix)); + return "sys-" + prefix.toLowerCase(); + } - public static boolean hasValidMediaFolderName(final MediaStorageConfigService.MediaFolderConfig config) - { + public static boolean hasValidMediaFolderName(final MediaStorageConfigService.MediaFolderConfig config) { final String containerAddress = computeContainerAddress(config); return hasValidLength(containerAddress) && hasValidFormat(containerAddress); } - private static boolean hasValidLength(final String folderQualifier) - { - return folderQualifier.length() >= MIN_AZURE_MEDIA_FOLDER_QUALIFIER_SIZE && folderQualifier.length() - <= MAX_AZURE_MEDIA_FOLDER_QUALIFIER_SIZE; + private static boolean hasValidLength(final String folderQualifier) { + return folderQualifier.length() >= MIN_AZURE_MEDIA_FOLDER_QUALIFIER_SIZE + && folderQualifier.length() <= MAX_AZURE_MEDIA_FOLDER_QUALIFIER_SIZE; } - private static boolean hasValidFormat(final String folderQualifier) - { - if (!folderQualifier.matches(AZURE_MEDIA_FOLDER_QUALIFIER_REGEX)) - { + private static boolean hasValidFormat(final String folderQualifier) { + if (!folderQualifier.matches(AZURE_MEDIA_FOLDER_QUALIFIER_REGEX)) { return false; } - if (folderQualifier.contains(String.valueOf(HYPHEN))) - { + if (folderQualifier.contains(String.valueOf(HYPHEN))) { return hasHyphenValidFormat(folderQualifier); } return true; } - private static boolean hasHyphenValidFormat(final String folderQualifier) - { + private static boolean hasHyphenValidFormat(final String folderQualifier) { final char firstChar = folderQualifier.charAt(0); final char lastChar = folderQualifier.charAt(folderQualifier.length() - 1); return !folderQualifier.contains(DOUBLE_HYPHEN) && firstChar != HYPHEN && lastChar != HYPHEN; diff --git a/commercedbsync/src/de/hybris/platform/core/TenantPropertiesLoader.java b/commercedbsync/src/de/hybris/platform/core/TenantPropertiesLoader.java index 81a8bbe..441ee20 100644 --- a/commercedbsync/src/de/hybris/platform/core/TenantPropertiesLoader.java +++ b/commercedbsync/src/de/hybris/platform/core/TenantPropertiesLoader.java @@ -1,15 +1,15 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package de.hybris.platform.core; import de.hybris.bootstrap.ddl.PropertiesLoader; import java.util.Objects; - public class TenantPropertiesLoader implements PropertiesLoader { private final Tenant tenant; @@ -27,4 +27,4 @@ public String getProperty(final String key) { public String getProperty(final String key, final String defaultValue) { return tenant.getConfig().getString(key, defaultValue); } -} \ No newline at end of file +} diff --git a/commercedbsync/testsrc/com/sap/cx/boosters/commercedbsync/views/TableViewGeneratorTest.java b/commercedbsync/testsrc/com/sap/cx/boosters/commercedbsync/views/TableViewGeneratorTest.java index 30e64d0..9baf5f4 100644 --- a/commercedbsync/testsrc/com/sap/cx/boosters/commercedbsync/views/TableViewGeneratorTest.java +++ b/commercedbsync/testsrc/com/sap/cx/boosters/commercedbsync/views/TableViewGeneratorTest.java @@ -1,3 +1,9 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + package com.sap.cx.boosters.commercedbsync.views; import java.util.HashSet; @@ -15,35 +21,35 @@ class TableViewGeneratorTest { - TableViewGenerator testObj; - MigrationContext ctx; - - @Before - void setUp() throws Exception { - ctx = Mockito.mock(MigrationContext.class); - DataRepository dr = Mockito.mock(DataRepository.class); - Mockito.when(ctx.getDataSourceRepository()).thenReturn(dr); - Mockito.when(dr.getAllColumnNames(Mockito.anyString())) - .thenReturn(Stream.of("hjmpTS", "createdTS", "modifiedTS", "TypePkString", "OwnerPkString", "PK", - "sealed", "p_mime", "p_size", "p_datapk", "p_location", "p_locationhash", "p_realfilename", - "p_code", "p_internalurl", "p_description", "p_alttext", "p_removable", "p_mediaformat", - "p_folder", "p_subfolderpath", "p_mediacontainer", "p_catalog", "p_catalogversion", "aCLTS", - "propTS", "p_outputmimetype", "p_inputmimetype", "p_itemtimestamp", "p_format", "p_sourceitem", - "p_fieldseparator", "p_quotecharacter", "p_commentcharacter", "p_encoding", "p_linestoskip", - "p_removeonsuccess", "p_zipentry", "p_extractionid", "p_auditrootitem", "p_auditreportconfig", - "p_scheduledcount", "p_cronjobpos", "p_cronjob") - .collect(Collectors.toCollection(HashSet::new))); - Mockito.when(ctx.getItemTypeViewNamePattern()).thenReturn("v_%s"); - Mockito.when(ctx.getViewWhereClause(Mockito.matches("medias"))).thenReturn(""); - Mockito.when(ctx.getCustomColumnsForView(Mockito.matches("medias"))).thenReturn(Map.of()); - testObj = new TableViewGenerator(); - } - - @Test - void testSimplestMedia() throws Exception { - ViewConfigurationContext result = testObj.generateForTable("medias", ctx); - Assert.assertNotNull(result); - - } + TableViewGenerator testObj; + MigrationContext ctx; + + @Before + void setUp() throws Exception { + ctx = Mockito.mock(MigrationContext.class); + DataRepository dr = Mockito.mock(DataRepository.class); + Mockito.when(ctx.getDataSourceRepository()).thenReturn(dr); + Mockito.when(dr.getAllColumnNames(Mockito.anyString())) + .thenReturn(Stream.of("hjmpTS", "createdTS", "modifiedTS", "TypePkString", "OwnerPkString", "PK", + "sealed", "p_mime", "p_size", "p_datapk", "p_location", "p_locationhash", "p_realfilename", + "p_code", "p_internalurl", "p_description", "p_alttext", "p_removable", "p_mediaformat", + "p_folder", "p_subfolderpath", "p_mediacontainer", "p_catalog", "p_catalogversion", "aCLTS", + "propTS", "p_outputmimetype", "p_inputmimetype", "p_itemtimestamp", "p_format", "p_sourceitem", + "p_fieldseparator", "p_quotecharacter", "p_commentcharacter", "p_encoding", "p_linestoskip", + "p_removeonsuccess", "p_zipentry", "p_extractionid", "p_auditrootitem", "p_auditreportconfig", + "p_scheduledcount", "p_cronjobpos", "p_cronjob") + .collect(Collectors.toCollection(HashSet::new))); + Mockito.when(ctx.getItemTypeViewNamePattern()).thenReturn("v_%s"); + Mockito.when(ctx.getViewWhereClause(Mockito.matches("medias"))).thenReturn(""); + Mockito.when(ctx.getCustomColumnsForView(Mockito.matches("medias"))).thenReturn(Map.of()); + testObj = new TableViewGenerator(); + } + + @Test + void testSimplestMedia() throws Exception { + ViewConfigurationContext result = testObj.generateForTable("medias", ctx); + Assert.assertNotNull(result); + + } } diff --git a/commercedbsync/velocity.log b/commercedbsync/velocity.log deleted file mode 100644 index e69de29..0000000 diff --git a/commercedbsynchac/.project b/commercedbsynchac/.project deleted file mode 100644 index 051ed21..0000000 --- a/commercedbsynchac/.project +++ /dev/null @@ -1,23 +0,0 @@ - - - commercedbsynchac - - - - - - org.eclipse.jdt.core.javabuilder - - - - - com.hybris.hyeclipse.tsv.builder - - - - - - com.hybris.hyeclipse.tsv.hybris - org.eclipse.jdt.core.javanature - - diff --git a/commercedbsynchac/.springBeans b/commercedbsynchac/.springBeans deleted file mode 100644 index fa78869..0000000 --- a/commercedbsynchac/.springBeans +++ /dev/null @@ -1,16 +0,0 @@ - - - 1 - - - - - - - resources/commercedbsynchac-spring.xml - - - - - - diff --git a/commercedbsynchac/hac/resources/jsp/dataCopy.jsp b/commercedbsynchac/hac/resources/jsp/dataCopy.jsp index c206e8b..b8298b3 100644 --- a/commercedbsynchac/hac/resources/jsp/dataCopy.jsp +++ b/commercedbsynchac/hac/resources/jsp/dataCopy.jsp @@ -13,6 +13,7 @@ media="screen, projection"/> "/> + @@ -25,15 +26,25 @@

    Data Migration

    - - - Incremental mode is enabled. Only rows changed after ${incrementalTimestamp} for specified tables will be copied. - + + + + Incremental mode is enabled. Only rows changed after ${incrementalTimestamp} for specified tables will be copied.
    +
    +
    + + + The timezone on source and target database are different. It could cause problem. Please take it into account and check components using timezone after migration.
    +
    + I am aware of timezone differences, proceed with migration +
    +
    + + +
    +
    "> +
    -
    - - -
    @@ -90,9 +101,20 @@
    - + +
    +
    "> + + +
    +
    +
    +
    "> + + +
    +
    +

    Migration Log

    diff --git a/commercedbsynchac/hac/resources/jsp/dataSource.jsp b/commercedbsynchac/hac/resources/jsp/dataSource.jsp index 557a21f..9e3ec0d 100644 --- a/commercedbsynchac/hac/resources/jsp/dataSource.jsp +++ b/commercedbsynchac/hac/resources/jsp/dataSource.jsp @@ -10,7 +10,7 @@ Migrate Data To SAP Commerce Cloud " type="text/css" media="screen, projection" /> - " type="text/css" media="screen, projection" /> + " type="text/css" media="screen, projection" /> @@ -42,6 +42,7 @@
    +
    @@ -58,6 +59,7 @@
    +
    diff --git a/commercedbsynchac/hac/resources/jsp/migrationReports.jsp b/commercedbsynchac/hac/resources/jsp/migrationReports.jsp index 191684b..866bc1f 100644 --- a/commercedbsynchac/hac/resources/jsp/migrationReports.jsp +++ b/commercedbsynchac/hac/resources/jsp/migrationReports.jsp @@ -11,7 +11,7 @@ Copy Schema To SAP Commerce Cloud " type="text/css" media="screen, projection"/> - " type="text/css" + " type="text/css" media="screen, projection"/> " type="text/css" media="screen, projection"/> diff --git a/commercedbsynchac/hac/resources/jsp/schemaCopy.jsp b/commercedbsynchac/hac/resources/jsp/schemaCopy.jsp index 82fad77..e39ab8a 100644 --- a/commercedbsynchac/hac/resources/jsp/schemaCopy.jsp +++ b/commercedbsynchac/hac/resources/jsp/schemaCopy.jsp @@ -11,7 +11,7 @@ Copy Schema To SAP Commerce Cloud " type="text/css" media="screen, projection" /> - " type="text/css" media="screen, projection" /> + " type="text/css" media="screen, projection" /> " type="text/css" media="screen, projection" /> " type="text/css" media="screen, projection" /> diff --git a/commercedbsynchac/hac/resources/static/js/configPanel.js b/commercedbsynchac/hac/resources/static/js/configPanel.js new file mode 100644 index 0000000..4fb2b5e --- /dev/null +++ b/commercedbsynchac/hac/resources/static/js/configPanel.js @@ -0,0 +1,176 @@ +'use strict'; +var ConfigPanel = (function (my) { + + let panelRoot; + let configItems + let renderers; + + my.initPanel = function (root) { + if (!root) return; + + configItems = []; + renderers = {}; + panelRoot = root; + initRenderers(); + init(root); + } + + my.values = function () { + var retVal = {}; + configItems.filter(function(item) { + return shouldRender(item); + }).forEach(function(item) { + retVal[item.propertyBinding] = resolveValue(panelRoot, item); + }); + return retVal; + } + + function initRenderers() { + renderers['java.lang.Boolean'] = initBooleanRenderer(); + renderers['java.lang.Integer'] = initIntegerRenderer(); + } + + function initBooleanRenderer() { + var retVal = {}; + retVal.render = function(root, item) { + var container = $("
    ").attr("class", "onoffswitch-large"); + var input = $("").attr("type", "checkbox").attr("class", "onoffswitch-checkbox").attr("id", `${item.id}`); + input.prop("checked", item.initialValue); + checkDisabled(root, item); + + input.change(function() { + configItems.forEach(function(_item) { + var _container = resolveContainerByItemId(_item.id); + if(item.id !== _item.id) { + checkDisabled(_container, _item); + } + else { + _item.initialValue = _container.is(":checked"); + } + }) + }); + + var label = $("").attr("class", "onoffswitch-label").attr("for", `${item.id}`); + var innerContainer = $("
    ").attr("class", "onoffswitch-inner").attr("_on","ENABLED").attr("_off","DISABLED"); + var switchContainer = $("
    ").attr("class", "onoffswitch-switch-large"); + + label.append(innerContainer); + label.append(switchContainer); + container.append(input); + container.append(label); + + root.append(container); + + } + retVal.element = function(root, item) { + return $(`#${item.id}`); + } + retVal.resolve = function(root, item) { + return retVal.element(root, item).is(":checked"); + } + return retVal; + } + + function initIntegerRenderer() { + var retVal = {}; + retVal.render = function(root, item) { + var input = $("").attr("type", "number").attr("id", `${item.id}`).attr("step", "1").attr("min", "1").attr("value", `${item.initialValue}`); + checkDisabled(root, item); + root.append(input); + } + retVal.element = function(root, item) { + return $(`#${item.id}`); + } + retVal.resolve = function(root, item) { + return retVal.element(root, item).val(); + } + return retVal; + } + + function shouldRender(item) { + return item.renderIf.interpolate({ + item: item, + configItems: configItems, + getItemById: resolveItemById, + getValueByItemId: function(item) { + return resolveValueByItemId(configPanel, item); + } + }) === 'true'; + } + + function checkDisabled(input, item) { + if(shouldRender(item)) { + input.show(); + } else { + input.hide(); + } + } + + function init(root) { + var token = $("meta[name='_csrf']").attr("content"); + var url = root.attr('data-configPanelDataUrl'); + $.ajax({ + url: url, + type: 'GET', + headers: { + 'Accept': 'application/json', + 'X-CSRF-TOKEN': token + }, + success: function (data) { + configItems = data.items; + if(configItems) { + configItems.forEach(function(item) { + renderItem(root, item); + }); + } + }, + error: function(xhr, status, error) { + console.error('Could not get panel config data'); + } + }); + } + + function renderItem(configPanel, item) { + var container = $("
    ").attr("id", `container-${item.id}`).attr("class", "span-4"); + var title = $(`

    ${item.name}:

    `); + container.append(title); + configPanel.append(container); + var renderer = renderers[item.type]; + renderer.render(container, item); + } + + function resolveValue(configPanel, item) { + return renderers[item.type].resolve(configPanel, item); + } + + function resolveValueByItemId(configPanel, id) { + var item = resolveItemById(id); + return renderers[item.type].resolve(configPanel, item); + } + + function resolveItemById(id) { + return configItems.filter(function(item) { + return item.id === id; + })[0]; + } + + function resolveContainerByItemId(id) { + return $(`#container-${id}`); + } + + function resolveRendererByItem(id) { + return configItems.filter(function(item) { + return item.id === id; + })[0]; + } + + String.prototype.interpolate = function(params) { + const names = Object.keys(params); + const values = Object.values(params); + return new Function(...names, `return \`${this}\`;`)(...values); + } + + + return my; +}(ConfigPanel || {})); + diff --git a/commercedbsynchac/hac/resources/static/js/dataCopy.js b/commercedbsynchac/hac/resources/static/js/dataCopy.js index dea15c1..1815df6 100644 --- a/commercedbsynchac/hac/resources/static/js/dataCopy.js +++ b/commercedbsynchac/hac/resources/static/js/dataCopy.js @@ -7,31 +7,40 @@ 'use strict'; (function () { function setupMigration() { - const startButton = document.getElementById("buttonCopyData") - const stopButton = document.getElementById("buttonStopCopyData") - const startUrl = startButton.dataset.url; - const stopUrl = stopButton.dataset.url; - const resumeUrl = startUrl.replace("copyData", "resumeRunning"); const statusContainer = document.getElementById('copyStatus'); const summaryContainer = document.getElementById('copySummary'); const timeContainer = document.getElementById('copyTime'); const statusUrl = statusContainer.dataset.url; const logContainer = document.getElementById("copyLogContainer"); - const reportButton = document.getElementById("buttonCopyReport") - const reportForm = document.getElementById("formCopyReport") + const reportButton = document.getElementById("buttonCopyReport"); + const dataSourceButton = document.getElementById("buttonDataSourceReport"); + const dataTargetButton = document.getElementById("buttonDataTargetReport") ; + const reportForm = document.getElementById("formCopyReport"); + const timezoneCheckbox = document.getElementById("timezoneCheckbox") const token = document.querySelector('meta[name="_csrf"]').content; - const switchPrefixButton = document.getElementById("buttonSwitchPrefix") let lastUpdateTime = Date.UTC(1970, 0, 1, 0, 0, 0); let pollInterval; let startButtonContentBefore; let currentMigrationID; + let startUrl; + let stopUrl; + + const startButton = document.getElementById("buttonCopyData") + const stopButton = document.getElementById("buttonStopCopyData") - startButton.disabled = true; - startButton.addEventListener('click', copyData); - stopButton.disabled = true; - stopButton.addEventListener('click', stopCopy); - switchPrefixButton.disabled = true; - switchPrefixButton.addEventListener('click', switchPrefix); + if (startButton && stopButton) { + startUrl = startButton.dataset.url; + stopUrl = stopButton.dataset.url; + + startButton.disabled = true; + startButton.addEventListener('click', copyData); + stopButton.disabled = true; + stopButton.addEventListener('click', stopCopy); + + ConfigPanel.initPanel($('#configPanel')); + + startButtonContentBefore = startButton.innerHTML; + } resumeRunning(); @@ -63,79 +72,107 @@ } - function switchPrefix() { - let switchButtonContentBefore = switchPrefixButton.innerHTML; - switchPrefixButton.innerHTML = switchButtonContentBefore + ' ' + hac.global.getSpinnerImg(); - $.ajax({ - url: switchPrefixButton.dataset.url, - type: 'PUT', - headers: { - 'Accept': 'application/json', - 'X-CSRF-TOKEN': token - }, - success: function (data) { - switchPrefixButton.innerHTML = switchButtonContentBefore; - }, - error: hac.global.err - }); - } - function resumeRunning() { $.ajax({ - url: resumeUrl, + url: '/hac/commercedbsynchac/resumeRunning', type: 'GET', headers: { 'Accept': 'application/json', 'X-CSRF-TOKEN': token }, success: function (data) { - if(data && (data.status === 'RUNNING' || data.status === 'PROCESSED' || data.status == 'POSTPROCESSING')) { - startButtonContentBefore = startButton.innerHTML; - startButton.innerHTML = startButtonContentBefore + ' ' + hac.global.getSpinnerImg(); - startButton.disabled = true; - reportButton.disabled = true; - stopButton.disabled = false; + if (data) { + if (data.status === 'RUNNING' && startButton) { + startButton.innerHTML = startButtonContentBefore + ' ' + hac.global.getSpinnerImg(); + + if (timezoneCheckbox) { + $('#timezoneCheckboxContainer').hide(); + } + } + + configureStartButton(data.status === 'RUNNING') + + reportButton.disabled = !(data.status === 'RUNNING'); + if (dataSourceButton) { + dataSourceButton.disabled = !(data.status === 'RUNNING'); + } + if (dataTargetButton) { + dataTargetButton.disabled = !(data.status === 'RUNNING'); + } + if (stopButton) { + stopButton.disabled = !(data.status === 'RUNNING'); + } currentMigrationID = data.migrationID; empty(logContainer); updateStatus(data); doPoll(); pollInterval = setInterval(doPoll, 5000); } else { - startButton.disabled = false; + configureStartButton(false) } }, error: function (data) { - startButton.disabled = false; + configureStartButton(false) } }); } + function configureStartButton(disable) { + if (!startButton) return; + + startButton.disabled = disable; + + if (!disable && timezoneCheckbox) { + startButton.disabled = !$(timezoneCheckbox).is(':checked'); + } + } + function copyData() { startButtonContentBefore = startButton.innerHTML; startButton.innerHTML = startButtonContentBefore + ' ' + hac.global.getSpinnerImg(); startButton.disabled = true; reportButton.disabled = true; + if (dataSourceButton) { + dataSourceButton.disabled = true; + } + if (dataTargetButton) { + dataTargetButton.disabled = true; + } stopButton.disabled = false; $.ajax({ url: startUrl, - type: 'PUT', + type: 'POST', + data: ConfigPanel.values(), headers: { 'Accept': 'application/json', - 'X-CSRF-TOKEN': token + 'X-CSRF-TOKEN': token, }, success: function (data) { + if(data.customException!=null) + { + hac.global.error(data.customException); + + stopButton.disabled = true; + startButton.innerHTML = startButtonContentBefore; + startButton.disabled = false; + } + else + { currentMigrationID = data.migrationID; empty(logContainer); updateStatus(data); doPoll(); pollInterval = setInterval(doPoll, 5000); + } + }, error: function(xht, textStatus, ex) { hac.global.error("Data migration process failed, please check the logs"); stopButton.disabled = true; startButton.innerHTML = startButtonContentBefore; - startButton.disabled = false; + + configureStartButton(false) } }); } @@ -143,7 +180,8 @@ function stopCopy() { stopButton.disabled = true; startButton.innerHTML = startButtonContentBefore; - startButton.disabled = false; + + configureStartButton(false) $.ajax({ url: stopUrl, type: 'PUT', @@ -225,12 +263,22 @@ logContainer.scrollTop = logContainer.scrollHeight - logContainer.clientHeight } updateStatus(status); - if (status.completed || status.failed) { - startButton.innerHTML = startButtonContentBefore - startButton.disabled = false; - stopButton.disabled = true; + if (status.completed) { + if (startButton) { + startButton.innerHTML = startButtonContentBefore + configureStartButton(false) + stopButton.disabled = true; + } $(reportForm).children('input[name=migrationId]').val(currentMigrationID); reportButton.disabled = false; + if (dataSourceButton) { + $(dataSourceButton).siblings('input[name=migrationId]').val(currentMigrationID); + dataSourceButton.disabled = false; + } + if (dataTargetButton) { + $(dataTargetButton).siblings('input[name=migrationId]').val(currentMigrationID); + dataTargetButton.disabled = false; + } clearInterval(pollInterval); } }, diff --git a/commercedbsynchac/hac/resources/static/js/dataSource.js b/commercedbsynchac/hac/resources/static/js/dataSource.js index ac21ddb..efe9e81 100644 --- a/commercedbsynchac/hac/resources/static/js/dataSource.js +++ b/commercedbsynchac/hac/resources/static/js/dataSource.js @@ -66,6 +66,7 @@ function validateSource() $('#buttonDsSourceValidate').html("Valid!"); } else { $('#buttonDsSourceValidate').html("Not valid!!"); + $('#connectionException').html(data.exception); } }, error: hac.global.err @@ -92,6 +93,7 @@ function validateTarget() $('#buttonDsTargetValidate').html("Valid!"); } else { $('#buttonDsTargetValidate').html("Not valid!!"); + $('#connectionExceptionTarget').html(data.exception); } }, error: hac.global.err diff --git a/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java b/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java index e78a987..fed6c1d 100644 --- a/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java +++ b/commercedbsynchac/hac/src/de/hybris/platform/hac/controller/CommercemigrationhacController.java @@ -1,11 +1,13 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package de.hybris.platform.hac.controller; import com.google.common.base.Joiner; +import com.google.common.collect.Lists; import com.google.gson.Gson; import com.google.gson.GsonBuilder; import com.microsoft.azure.storage.blob.CloudBlockBlob; @@ -15,13 +17,15 @@ import org.apache.commons.lang.BooleanUtils; import org.apache.commons.lang.StringUtils; import org.apache.commons.lang.exception.ExceptionUtils; +import org.apache.commons.lang3.tuple.Pair; import org.apache.logging.log4j.util.Strings; import com.sap.cx.boosters.commercedbsync.MigrationStatus; import com.sap.cx.boosters.commercedbsync.constants.CommercedbsyncConstants; +import com.sap.cx.boosters.commercedbsync.context.LaunchOptions; import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsync.logging.JDBCQueriesStore; import com.sap.cx.boosters.commercedbsync.repository.DataRepository; import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationService; -import com.sap.cx.boosters.commercedbsync.service.DatabaseMigrationSynonymService; import com.sap.cx.boosters.commercedbsync.service.DatabaseSchemaDifferenceService; import com.sap.cx.boosters.commercedbsync.service.impl.BlobDatabaseMigrationReportStorageService; import com.sap.cx.boosters.commercedbsync.service.impl.DefaultDatabaseSchemaDifferenceService; @@ -41,6 +45,7 @@ import javax.servlet.http.HttpServletResponse; import java.io.ByteArrayInputStream; import java.io.InputStream; +import java.io.Serializable; import java.nio.charset.StandardCharsets; import java.text.SimpleDateFormat; import java.time.Instant; @@ -50,7 +55,6 @@ import java.util.*; import java.util.stream.Collectors; - /** * */ @@ -58,9 +62,11 @@ @RequestMapping("/commercedbsynchac/**") public class CommercemigrationhacController { - public static final String DEFAULT_EMPTY_VAL = "[NOT SET]"; + private static final String DEFAULT_EMPTY_VAL = "[NOT SET]"; + private static final boolean DEFAULT_BOOLEAN_VAL = false; private static final Logger LOG = LoggerFactory.getLogger(CommercemigrationhacController.class); - private static final SimpleDateFormat DATE_TIME_FORMATTER = new SimpleDateFormat("YYYY-MM-dd HH:mm", Locale.ENGLISH); + private static final SimpleDateFormat DATE_TIME_FORMATTER = new SimpleDateFormat("YYYY-MM-dd HH:mm", + Locale.ENGLISH); @Autowired private UserService userService; @@ -76,56 +82,71 @@ public class CommercemigrationhacController { @Autowired private MigrationContext migrationContext; - - @Autowired - private DatabaseMigrationSynonymService databaseMigrationSynonymService; - @Autowired private MetricService metricService; @Autowired BlobDatabaseMigrationReportStorageService blobDatabaseMigrationReportStorageService; - @RequestMapping(value = - {"/migrationSchema"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/migrationSchema"}, method = {org.springframework.web.bind.annotation.RequestMethod.GET}) public String schema(final Model model) { logAction("Schema migration tab clicked"); - // ORACLE_TARGET -- start - migrationContext.refreshSelf(); - // ORACLE_TARGET -- END + // ORACLE_TARGET -- start + migrationContext.refreshSelf(); + // ORACLE_TARGET -- END model.addAttribute("wikiJdbcLogging", "some notes on database"); model.addAttribute("wikiDatabase", "some more note on supported features"); Map schemaSettings = new HashMap<>(); - schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_COLUMNS_ADD_ENABLED, migrationContext.isAddMissingColumnsToSchemaEnabled()); - schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_TABLES_REMOVE_ENABLED, migrationContext.isRemoveMissingTablesToSchemaEnabled()); - schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_TABLES_ADD_ENABLED, migrationContext.isAddMissingTablesToSchemaEnabled()); - schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_COLUMNS_REMOVE_ENABLED, migrationContext.isRemoveMissingColumnsToSchemaEnabled()); + schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_COLUMNS_ADD_ENABLED, + migrationContext.isAddMissingColumnsToSchemaEnabled()); + schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_TABLES_REMOVE_ENABLED, + migrationContext.isRemoveMissingTablesToSchemaEnabled()); + schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_TABLES_ADD_ENABLED, + migrationContext.isAddMissingTablesToSchemaEnabled()); + schemaSettings.put(CommercedbsyncConstants.MIGRATION_SCHEMA_TARGET_COLUMNS_REMOVE_ENABLED, + migrationContext.isRemoveMissingColumnsToSchemaEnabled()); model.addAttribute("schemaSettings", schemaSettings); model.addAttribute("schemaMigrationDisabled", !migrationContext.isSchemaMigrationEnabled()); model.addAttribute("schemaSqlForm", new SchemaSqlFormData()); return "schemaCopy"; } - @RequestMapping(value = - {"/migrationData"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/migrationData"}, method = {org.springframework.web.bind.annotation.RequestMethod.GET}) public String data(final Model model) { logAction("Data migration tab clicked"); - // ORACLE_TARGET -- start -// migrationContext.refreshSelf(); + // ORACLE_TARGET -- start + // migrationContext.refreshSelf(); + model.addAttribute("isTimezoneEqual", checkTimeZoneDifferences(migrationContext)); model.addAttribute("isIncremental", migrationContext.isIncrementalModeEnabled()); Instant timestamp = migrationContext.getIncrementalTimestamp(); model.addAttribute("incrementalTimestamp", timestamp == null ? DEFAULT_EMPTY_VAL : timestamp); - model.addAttribute("srcTsName", StringUtils.defaultIfEmpty(migrationContext.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName(), DEFAULT_EMPTY_VAL)); - model.addAttribute("tgtTsName", StringUtils.defaultIfEmpty(migrationContext.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName(), DEFAULT_EMPTY_VAL)); - model.addAttribute("srcPrefix", StringUtils.defaultIfEmpty(migrationContext.getDataSourceRepository().getDataSourceConfiguration().getTablePrefix(), DEFAULT_EMPTY_VAL)); - model.addAttribute("tgtMigPrefix", StringUtils.defaultIfEmpty(migrationContext.getDataTargetRepository().getDataSourceConfiguration().getTablePrefix(), DEFAULT_EMPTY_VAL)); - model.addAttribute("tgtActualPrefix", StringUtils.defaultIfEmpty(configurationService.getConfiguration().getString("db.tableprefix"), DEFAULT_EMPTY_VAL)); + model.addAttribute("srcTsName", + StringUtils.defaultIfEmpty( + migrationContext.getDataSourceRepository().getDataSourceConfiguration().getTypeSystemName(), + DEFAULT_EMPTY_VAL)); + model.addAttribute("tgtTsName", + StringUtils.defaultIfEmpty( + migrationContext.getDataTargetRepository().getDataSourceConfiguration().getTypeSystemName(), + DEFAULT_EMPTY_VAL)); + model.addAttribute("srcPrefix", + StringUtils.defaultIfEmpty( + migrationContext.getDataSourceRepository().getDataSourceConfiguration().getTablePrefix(), + DEFAULT_EMPTY_VAL)); + model.addAttribute("tgtMigPrefix", + StringUtils.defaultIfEmpty( + migrationContext.getDataTargetRepository().getDataSourceConfiguration().getTablePrefix(), + DEFAULT_EMPTY_VAL)); + model.addAttribute("tgtActualPrefix", StringUtils.defaultIfEmpty( + configurationService.getConfiguration().getString("db.tableprefix"), DEFAULT_EMPTY_VAL)); + model.addAttribute("isLogSql", + BooleanUtils.toBooleanDefaultIfNull(migrationContext.isLogSql(), DEFAULT_BOOLEAN_VAL)); + model.addAttribute("isSchedulerResumeEnabled", migrationContext.isSchedulerResumeEnabled()); + model.addAttribute("isDataExportEnabled", migrationContext.isDataExportEnabled()); return "dataCopy"; } - @RequestMapping(value = {"/migrationDataSource"}, method = {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/migrationDataSource"}, method = { + org.springframework.web.bind.annotation.RequestMethod.GET}) public String dataSource(final Model model) { logAction("Data sources tab clicked"); model.addAttribute("wikiJdbcLogging", "some notes on database"); @@ -133,7 +154,8 @@ public String dataSource(final Model model) { return "dataSource"; } - @RequestMapping(value = {"/migrationDataSource/{profile}"}, method = {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/migrationDataSource/{profile}"}, method = { + org.springframework.web.bind.annotation.RequestMethod.GET}) @ResponseBody public DataSourceConfigurationData dataSourceInfo(final Model model, @PathVariable String profile) { model.addAttribute("wikiJdbcLogging", "some notes on database"); @@ -145,23 +167,25 @@ public DataSourceConfigurationData dataSourceInfo(final Model model, @PathVariab dataSourceConfigurationData = new DataSourceConfigurationData(); dataSourceConfigurationData.setProfile(dataRepository.getDataSourceConfiguration().getProfile()); dataSourceConfigurationData.setDriver(dataRepository.getDataSourceConfiguration().getDriver()); - dataSourceConfigurationData.setConnectionString(MaskUtil.stripJdbcPassword(dataRepository.getDataSourceConfiguration().getConnectionString())); + dataSourceConfigurationData.setConnectionString( + MaskUtil.stripJdbcPassword(dataRepository.getDataSourceConfiguration().getConnectionString())); dataSourceConfigurationData.setUserName(dataRepository.getDataSourceConfiguration().getUserName()); - dataSourceConfigurationData.setPassword(dataRepository.getDataSourceConfiguration().getPassword().replaceAll(".*", "*")); + dataSourceConfigurationData + .setPassword(dataRepository.getDataSourceConfiguration().getPassword().replaceAll(".*", "*")); dataSourceConfigurationData.setCatalog(dataRepository.getDataSourceConfiguration().getCatalog()); dataSourceConfigurationData.setSchema(dataRepository.getDataSourceConfiguration().getSchema()); dataSourceConfigurationData.setMaxActive(dataRepository.getDataSourceConfiguration().getMaxActive()); dataSourceConfigurationData.setMaxIdle(dataRepository.getDataSourceConfiguration().getMaxIdle()); dataSourceConfigurationData.setMinIdle(dataRepository.getDataSourceConfiguration().getMinIdle()); - dataSourceConfigurationData.setRemoveAbandoned(dataRepository.getDataSourceConfiguration().isRemoveAbandoned()); + dataSourceConfigurationData + .setRemoveAbandoned(dataRepository.getDataSourceConfiguration().isRemoveAbandoned()); } return dataSourceConfigurationData; } - @RequestMapping(value = - {"/migrationDataSource/{profile}/validate"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/migrationDataSource/{profile}/validate"}, method = { + org.springframework.web.bind.annotation.RequestMethod.GET}) @ResponseBody public DataSourceValidationResultData dataSourceValidation(final Model model, @PathVariable String profile) { logAction("Validate connections button clicked"); @@ -186,37 +210,36 @@ public DataSourceValidationResultData dataSourceValidation(final Model model, @P } private DataRepository getDataRepository(String profile) { - if (StringUtils.equalsIgnoreCase(profile, migrationContext.getDataSourceRepository().getDataSourceConfiguration().getProfile())) { + if (StringUtils.equalsIgnoreCase(profile, + migrationContext.getDataSourceRepository().getDataSourceConfiguration().getProfile())) { return migrationContext.getDataSourceRepository(); - } else if (StringUtils.equalsIgnoreCase(profile, migrationContext.getDataTargetRepository().getDataSourceConfiguration().getProfile())) { + } else if (StringUtils.equalsIgnoreCase(profile, + migrationContext.getDataTargetRepository().getDataSourceConfiguration().getProfile())) { return migrationContext.getDataTargetRepository(); } else { return null; } } - @RequestMapping(value = - {"/generateSchemaScript"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/generateSchemaScript"}, method = { + org.springframework.web.bind.annotation.RequestMethod.GET}) @ResponseBody public String generateSchemaScript() throws Exception { logAction("Generate schema script button clicked"); - // ORACLE_TARGET -- start - migrationContext.refreshSelf(); - // ORACLE_TARGET -- END + // ORACLE_TARGET -- start + migrationContext.refreshSelf(); + // ORACLE_TARGET -- END return databaseSchemaDifferenceService.generateSchemaDifferencesSql(migrationContext); } - @RequestMapping(value = - {"/migrateSchema"}, method = - {org.springframework.web.bind.annotation.RequestMethod.POST}) + @RequestMapping(value = {"/migrateSchema"}, method = {org.springframework.web.bind.annotation.RequestMethod.POST}) @ResponseBody public String migrateSchema(@ModelAttribute("schemaSqlForm") SchemaSqlFormData data) { try { logAction("Execute script button clicked"); - // ORACLE_TARGET -- start - migrationContext.refreshSelf(); - // ORACLE_TARGET -- END + // ORACLE_TARGET -- start + migrationContext.refreshSelf(); + // ORACLE_TARGET -- END if (BooleanUtils.isTrue(data.getAccepted())) { databaseSchemaDifferenceService.executeSchemaDifferencesSql(migrationContext, data.getSqlQuery()); } else { @@ -228,16 +251,18 @@ public String migrateSchema(@ModelAttribute("schemaSqlForm") SchemaSqlFormData d return "Successfully executed sql"; } - @RequestMapping(value = - {"/previewSchemaMigration"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/previewSchemaMigration"}, method = { + org.springframework.web.bind.annotation.RequestMethod.GET}) @ResponseBody public SchemaDifferenceResultContainerData previewSchemaMigration() throws Exception { logAction("Preview schema migration changes button clicked"); LOG.info("Starting preview of source and target db diff..."); - DefaultDatabaseSchemaDifferenceService.SchemaDifferenceResult difference = databaseSchemaDifferenceService.getDifference(migrationContext); - SchemaDifferenceResultData sourceSchemaDifferenceResultData = getSchemaDifferenceResultData(difference.getSourceSchema()); - SchemaDifferenceResultData targetSchemaDifferenceResultData = getSchemaDifferenceResultData(difference.getTargetSchema()); + DefaultDatabaseSchemaDifferenceService.SchemaDifferenceResult difference = databaseSchemaDifferenceService + .getDifference(migrationContext); + SchemaDifferenceResultData sourceSchemaDifferenceResultData = getSchemaDifferenceResultData( + difference.getSourceSchema()); + SchemaDifferenceResultData targetSchemaDifferenceResultData = getSchemaDifferenceResultData( + difference.getTargetSchema()); SchemaDifferenceResultContainerData schemaDifferenceResultContainerData = new SchemaDifferenceResultContainerData(); schemaDifferenceResultContainerData.setSource(sourceSchemaDifferenceResultData); schemaDifferenceResultContainerData.setTarget(targetSchemaDifferenceResultData); @@ -245,21 +270,24 @@ public SchemaDifferenceResultContainerData previewSchemaMigration() throws Excep Gson gson = new GsonBuilder().setPrettyPrinting().create(); String timeStamp = new SimpleDateFormat("yyyy-MM-dd-HH-mm-ss").format(new Date()); try { - InputStream is = new ByteArrayInputStream(gson.toJson(schemaDifferenceResultContainerData).getBytes(StandardCharsets.UTF_8)); - blobDatabaseMigrationReportStorageService.store("schema-differences-"+timeStamp+".json", is); - } catch (Exception e){ + InputStream is = new ByteArrayInputStream( + gson.toJson(schemaDifferenceResultContainerData).getBytes(StandardCharsets.UTF_8)); + blobDatabaseMigrationReportStorageService.store("schema-differences-" + timeStamp + ".json", is); + } catch (Exception e) { LOG.error("Failed to save the schema differences report to blob storage!"); } return schemaDifferenceResultContainerData; } - private SchemaDifferenceResultData getSchemaDifferenceResultData(DefaultDatabaseSchemaDifferenceService.SchemaDifference diff) { + private SchemaDifferenceResultData getSchemaDifferenceResultData( + DefaultDatabaseSchemaDifferenceService.SchemaDifference diff) { SchemaDifferenceResultData schemaDifferenceResultData = new SchemaDifferenceResultData(); Map missingTablesMap = diff.getMissingTables().stream() .collect(Collectors.toMap(e -> getTableName(diff, e.getRightName()), e -> "")); Map missingColumnsMap = diff.getMissingColumnsInTable().asMap().entrySet().stream() - .collect(Collectors.toMap(e -> getTableName(diff, e.getKey().getRightName()), e -> Joiner.on(";").join(e.getValue()))); + .collect(Collectors.toMap(e -> getTableName(diff, e.getKey().getRightName()), + e -> Joiner.on(";").join(e.getValue()))); Map map = new HashMap<>(); map.putAll(missingTablesMap); @@ -285,24 +313,56 @@ private String getTableName(DefaultDatabaseSchemaDifferenceService.SchemaDiffere } } - @RequestMapping(value = "/copyData", method = RequestMethod.PUT, produces = MediaType.APPLICATION_JSON_VALUE) + @RequestMapping(value = "/copyData", method = RequestMethod.POST, produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody - public MigrationStatus copyData() throws Exception { - logAction("Start data migration executed"); - // ORACLE_TARGET -- start - migrationContext.refreshSelf(); - // ORACLE_TARGET -- END - String currentMigrationId = databaseMigrationService.startMigration(migrationContext); + public MigrationStatus copyData(@RequestParam Map copyConfig) throws Exception { + if (migrationContext.isDataExportEnabled()) { + throw new IllegalStateException("Migration cannot be started from HAC"); + } + + String currentMigrationId = databaseMigrationService.getMigrationID(migrationContext); + MigrationStatus migrationStatus = new MigrationStatus(); + LaunchOptions launchOptions = new LaunchOptions(); + launchOptions.getPropertyOverrideMap().putAll(copyConfig); + Serializable isResume = copyConfig.getOrDefault(CommercedbsyncConstants.MIGRATION_SCHEDULER_RESUME_ENABLED, + false); + + // ORACLE_TARGET -- start + migrationContext.refreshSelf(); + // ORACLE_TARGET -- END + + try { + if (BooleanUtils.toBoolean(isResume.toString()) && StringUtils.isNotEmpty(currentMigrationId)) { + logAction("Resume data migration executed"); + + databaseMigrationService.resumeUnfinishedMigration(migrationContext, launchOptions, currentMigrationId); + } else { + logAction("Start data migration executed"); + + currentMigrationId = databaseMigrationService.startMigration(migrationContext, launchOptions); + } + } catch (Exception e) { + migrationStatus.setCustomException(e.getMessage()); + + return migrationStatus; + } finally { + copyConfig.replace(CommercedbsyncConstants.MIGRATION_SCHEDULER_RESUME_ENABLED, false); + } + return databaseMigrationService.getMigrationState(migrationContext, currentMigrationId); } @RequestMapping(value = "/abortCopy", method = RequestMethod.PUT, produces = MediaType.APPLICATION_JSON_VALUE) @ResponseBody public String abortCopy(@RequestBody String migrationID) throws Exception { + if (migrationContext.isDataExportEnabled()) { + throw new IllegalStateException("Migration cannot be aborted from HAC"); + } + logAction("Stop data migration executed"); - // ORACLE_TARGET -- start - migrationContext.refreshSelf(); - // ORACLE_TARGET -- END + // ORACLE_TARGET -- start + migrationContext.refreshSelf(); + // ORACLE_TARGET -- END databaseMigrationService.stopMigration(migrationContext, migrationID); return "true"; } @@ -310,9 +370,10 @@ public String abortCopy(@RequestBody String migrationID) throws Exception { @GetMapping(value = "/resumeRunning") @ResponseBody public MigrationStatus resumeRunning() throws Exception { - final String currentMigrationId = databaseMigrationService.getMigrationID(migrationContext); - if (StringUtils.isNotEmpty(currentMigrationId)) { - MigrationStatus migrationState = databaseMigrationService.getMigrationState(migrationContext, currentMigrationId); + final String currentMigrationId = databaseMigrationService.getMigrationID(migrationContext); + if (StringUtils.isNotEmpty(currentMigrationId)) { + MigrationStatus migrationState = databaseMigrationService.getMigrationState(migrationContext, + currentMigrationId); prepareStateForJsonSerialization(migrationState); return migrationState; } else { @@ -324,7 +385,8 @@ public MigrationStatus resumeRunning() throws Exception { @ResponseBody public MigrationStatus copyStatus(@RequestParam String migrationID, @RequestParam long since) throws Exception { OffsetDateTime sinceTime = OffsetDateTime.ofInstant(Instant.ofEpochMilli(since), ZoneOffset.UTC); - MigrationStatus migrationState = databaseMigrationService.getMigrationState(migrationContext, migrationID, sinceTime); + MigrationStatus migrationState = databaseMigrationService.getMigrationState(migrationContext, migrationID, + sinceTime); prepareStateForJsonSerialization(migrationState); return migrationState; } @@ -350,12 +412,9 @@ private Long convertToEpoch(LocalDateTime time) { return time.toInstant(ZoneOffset.UTC).toEpochMilli(); } - @GetMapping( - value = "/copyReport", - produces = MediaType.APPLICATION_OCTET_STREAM_VALUE - ) - public @ResponseBody - byte[] getCopyReport(@RequestParam String migrationId, HttpServletResponse response) throws Exception { + @GetMapping(value = "/copyReport", produces = MediaType.APPLICATION_OCTET_STREAM_VALUE) + public @ResponseBody byte[] getCopyReport(@RequestParam String migrationId, HttpServletResponse response) + throws Exception { logAction("Download migration report button clicked"); response.setHeader("Content-Disposition", "attachment; filename=migration-report.json"); Gson gson = new GsonBuilder().setPrettyPrinting().create(); @@ -363,11 +422,28 @@ byte[] getCopyReport(@RequestParam String migrationId, HttpServletResponse respo return json.getBytes(StandardCharsets.UTF_8.name()); } - @RequestMapping(value = "/switchPrefix", method = RequestMethod.PUT) - @ResponseBody - public Boolean switchPrefix(@RequestParam String prefix) throws Exception { - databaseMigrationSynonymService.recreateSynonyms(migrationContext.getDataTargetRepository(), prefix); - return Boolean.TRUE; + @GetMapping(value = "/dataSourceJdbcReport", produces = MediaType.APPLICATION_OCTET_STREAM_VALUE) + public @ResponseBody byte[] getDataSourceJdbcReport(@RequestParam String migrationId, + HttpServletResponse response) { + logAction("Download data source jdbc queries report button clicked"); + JDBCQueriesStore sourceEntriesStore = migrationContext.getDataSourceRepository().getJdbcQueriesStore(); + return getLogFile(migrationId, response, sourceEntriesStore); + } + + @GetMapping(value = "/dataTargetJdbcReport", produces = MediaType.APPLICATION_OCTET_STREAM_VALUE) + public @ResponseBody byte[] getDataTargetJdbcReport(@RequestParam String migrationId, + HttpServletResponse response) { + logAction("Download data target jdbc queries report button clicked"); + JDBCQueriesStore targetEntriesStore = migrationContext.getDataTargetRepository().getJdbcQueriesStore(); + return getLogFile(migrationId, response, targetEntriesStore); + } + + private byte[] getLogFile(String migrationId, HttpServletResponse response, JDBCQueriesStore jdbcQueriesStore) { + Pair logFilePair = jdbcQueriesStore.getLogFile(migrationId); + final byte[] logFileBytes = logFilePair.getLeft(); + final String logFileName = logFilePair.getRight(); + response.setHeader("Content-Disposition", "attachment; filename=" + logFileName); + return logFileBytes; } @RequestMapping(value = "/metrics", method = RequestMethod.GET) @@ -377,25 +453,25 @@ public List getMetrics() throws Exception { } private void logAction(String message) { - LOG.info("{}: {} - User:{} - Time:{}", "CMT Action", message, userService.getCurrentUser().getUid(),LocalDateTime.now()); + LOG.info("{}: {} - User:{} - Time:{}", "CMT Action", message, userService.getCurrentUser().getUid(), + LocalDateTime.now()); } - @RequestMapping(value = - {"/loadMigrationReports"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/loadMigrationReports"}, method = { + org.springframework.web.bind.annotation.RequestMethod.GET}) @ResponseBody public List loadMigrationReports() { try { - List blobs = blobDatabaseMigrationReportStorageService.listAllReports(); - List result = new ArrayList<>(); - blobs.forEach(blob -> { - ReportResultData reportResultData = new ReportResultData(); - reportResultData.setModifiedTimestamp(getSortableTimestamp(blob)); - reportResultData.setReportId(blob.getName()); - reportResultData.setPrimaryUri(blob.getUri().toString()); - result.add(reportResultData); - }); - return result; + List blobs = blobDatabaseMigrationReportStorageService.listAllReports(); + List result = new ArrayList<>(); + blobs.forEach(blob -> { + ReportResultData reportResultData = new ReportResultData(); + reportResultData.setModifiedTimestamp(getSortableTimestamp(blob)); + reportResultData.setReportId(blob.getName()); + reportResultData.setPrimaryUri(blob.getUri().toString()); + result.add(reportResultData); + }); + return result; } catch (Exception e) { e.printStackTrace(); } @@ -403,38 +479,78 @@ public List loadMigrationReports() { } private String getSortableTimestamp(CloudBlockBlob blob) { - if(blob != null && blob.getProperties() != null) { + if (blob != null && blob.getProperties() != null) { Date lastModified = blob.getProperties().getLastModified(); - if(lastModified != null) { + if (lastModified != null) { return DATE_TIME_FORMATTER.format(lastModified); } } return Strings.EMPTY; } - @GetMapping( - value = "/downloadLogsReport", - produces = MediaType.APPLICATION_OCTET_STREAM_VALUE - ) - public @ResponseBody - ResponseEntity downloadLogsReport(@RequestParam String migrationId) throws Exception { + @GetMapping(value = "/downloadLogsReport", produces = MediaType.APPLICATION_OCTET_STREAM_VALUE) + public @ResponseBody ResponseEntity downloadLogsReport(@RequestParam String migrationId) throws Exception { logAction("Download migration report button clicked"); byte[] outputFile = blobDatabaseMigrationReportStorageService.getReport(migrationId); HttpHeaders responseHeaders = new HttpHeaders(); - responseHeaders.set("charset", "utf-8"); - responseHeaders.setContentType(MediaType.valueOf("text/plain")); + responseHeaders.set("Content-Type", "text/plain; charset=utf-8"); responseHeaders.setContentLength(outputFile.length); responseHeaders.set("Content-disposition", "attachment; filename=migration-report.json"); return new ResponseEntity<>(outputFile, responseHeaders, HttpStatus.OK); } - - @RequestMapping(value = - {"/migrationReports"}, method = - {org.springframework.web.bind.annotation.RequestMethod.GET}) + @RequestMapping(value = {"/migrationReports"}, method = {org.springframework.web.bind.annotation.RequestMethod.GET}) public String reports(final Model model) { logAction("Migration reports tab clicked"); return "migrationReports"; } + @RequestMapping(value = {"/configPanel"}, method = {org.springframework.web.bind.annotation.RequestMethod.GET}) + public @ResponseBody ConfigPanelDTO configPanel(final Model model) { + ConfigPanelDTO configPanelDTO = new ConfigPanelDTO(); + ConfigPanelItemDTO resume = createConfigItem("resumeUnfinishedItems", "Resume Mode", + "If enabled, resumes next migration from where it was stopped", Boolean.class, + migrationContext.isSchedulerResumeEnabled(), "true", + CommercedbsyncConstants.MIGRATION_SCHEDULER_RESUME_ENABLED); + ConfigPanelItemDTO parTables = createConfigItem("maxParallelTableCopy", "Parallel Tables", + "Number of tables to be copied in parallel", Integer.class, migrationContext.getMaxParallelTableCopy(), + "true", CommercedbsyncConstants.MIGRATION_DATA_MAXPRALLELTABLECOPY); + ConfigPanelItemDTO maxReader = createConfigItem("maxReaderWorkers", "Reader Workers", + "Number of reader workers to be used for each table", Integer.class, + migrationContext.getMaxParallelReaderWorkers(), "true", + CommercedbsyncConstants.MIGRATION_DATA_WORKERS_READER_MAXTASKS); + ConfigPanelItemDTO maxWriter = createConfigItem("maxWriterWorkers", "Writer Workers", + "Number of writer workers to be used for each table", Integer.class, + migrationContext.getMaxParallelWriterWorkers(), "true", + CommercedbsyncConstants.MIGRATION_DATA_WORKERS_WRITER_MAXTASKS); + ConfigPanelItemDTO batchSize = createConfigItem("batchSize", "Batch Size", "Batch size used to query data", + Integer.class, migrationContext.getReaderBatchSize(), "${!getValueByItemId('resumeUnfinishedItems')}", + CommercedbsyncConstants.MIGRATION_DATA_READER_BATCHSIZE); + configPanelDTO.setItems(Lists.newArrayList(resume, parTables, maxReader, maxWriter, batchSize)); + return configPanelDTO; + } + + private ConfigPanelItemDTO createConfigItem(String id, String name, String description, Class type, + Object initialValue, String renderIf, String propertyBinding) { + ConfigPanelItemDTO configPanelItemDTO = new ConfigPanelItemDTO(); + configPanelItemDTO.setId(id); + configPanelItemDTO.setName(name); + configPanelItemDTO.setDescription(description); + configPanelItemDTO.setType(type); + configPanelItemDTO.setInitialValue(initialValue); + configPanelItemDTO.setRenderIf(renderIf); + configPanelItemDTO.setPropertyBinding(propertyBinding); + return configPanelItemDTO; + } + + private boolean checkTimeZoneDifferences(MigrationContext context) { + TimeZone source = TimeZone.getTimeZone(context.getDataSourceRepository().getDatabaseTimezone()); + if (TimeZone.getTimeZone("UTC").getRawOffset() == source.getRawOffset()) { + LOG.info("The timezone on source and target are the same!!"); + return true; + } + LOG.info("The timezone on source and target are different!!"); + return false; + + } } diff --git a/commercedbsynchac/hac/testclasses/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.class b/commercedbsynchac/hac/testclasses/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.class deleted file mode 100644 index 9d97625..0000000 Binary files a/commercedbsynchac/hac/testclasses/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.class and /dev/null differ diff --git a/commercedbsynchac/hac/testsrc/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.java b/commercedbsynchac/hac/testsrc/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.java index b6a3e9e..74dd53f 100644 --- a/commercedbsynchac/hac/testsrc/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.java +++ b/commercedbsynchac/hac/testsrc/de/hybris/platform/hac/controller/CommercemigrationhacControllerTest.java @@ -1,13 +1,9 @@ /* - * [y] hybris Platform + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 * - * Copyright (c) 2018 SAP SE or an SAP affiliate company. All rights reserved. - * - * This software is the confidential and proprietary information of SAP - * ("Confidential Information"). You shall not disclose such Confidential - * Information and shall use it only in accordance with the terms of the - * license agreement you entered into with SAP. */ + package de.hybris.platform.hac.controller; import de.hybris.bootstrap.annotations.IntegrationTest; @@ -15,7 +11,6 @@ import org.junit.Before; import org.junit.Test; - /** * Test for {@link CommercemigrationhacController}. */ @@ -45,11 +40,10 @@ public void teardown() { @Test public void testSayHello() { - /* - final String helloText = cut.sayHello(); - - assertNotNull(helloText); - assertNotEquals(0, helloText.length()); - */ + /* + * final String helloText = cut.sayHello(); + * + * assertNotNull(helloText); assertNotEquals(0, helloText.length()); + */ } } diff --git a/commercedbsynchac/project.properties b/commercedbsynchac/project.properties index 44b5307..ae90c6a 100644 --- a/commercedbsynchac/project.properties +++ b/commercedbsynchac/project.properties @@ -6,4 +6,7 @@ commercedbsynchac.key=value # Specifies the location of the spring context file putted automatically to the global platform application context. commercedbsynchac.application-context=commercedbsynchac-spring.xml -migration.from.hac.enabled=true \ No newline at end of file +migration.from.hac.enabled=true + +## fix for Config Panel rendering error due to: "EvalError: Refused to evaluate a string as JavaScript because 'unsafe-eval' is not an allowed" at static/js/configPanel.js:169 +hac.xss.filter.header.Content-Security-Policy=default-src 'self'; style-src 'self' 'unsafe-inline'; img-src 'self' data:; script-src 'self' 'unsafe-inline' 'unsafe-eval' \ No newline at end of file diff --git a/commercedbsynchac/resources/commercedbsynchac-beans.xml b/commercedbsynchac/resources/commercedbsynchac-beans.xml index edc74ad..d1822b6 100644 --- a/commercedbsynchac/resources/commercedbsynchac-beans.xml +++ b/commercedbsynchac/resources/commercedbsynchac-beans.xml @@ -66,5 +66,17 @@ + + + + + + + + + + + + diff --git a/commercedbsynchac/resources/commercedbsynchac-spring.xml b/commercedbsynchac/resources/commercedbsynchac-spring.xml index 2469c7e..ff81411 100644 --- a/commercedbsynchac/resources/commercedbsynchac-spring.xml +++ b/commercedbsynchac/resources/commercedbsynchac-spring.xml @@ -23,7 +23,9 @@ - + + + @@ -38,11 +40,19 @@ - - + + - - + + + + + + + + diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/CommercedbsynchacStandalone.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/CommercedbsynchacStandalone.java index 65f6381..b5022a0 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/CommercedbsynchacStandalone.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/CommercedbsynchacStandalone.java @@ -1,8 +1,9 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ + package com.sap.cx.boosters.commercedbsynchac; import de.hybris.platform.core.Registry; @@ -10,21 +11,22 @@ import de.hybris.platform.util.RedeployUtilities; import de.hybris.platform.util.Utilities; - /** - * Demonstration of how to write a standalone application that can be run directly from within eclipse or from the - * commandline.
    + * Demonstration of how to write a standalone application that can be run + * directly from within eclipse or from the commandline.
    * To run this from commandline, just use the following command:
    * * java -jar bootstrap/bin/ybootstrap.jar "new commercedbsynchac.CommercedbsynchacStandalone().run();" - * From eclipse, just run as Java Application. Note that you maybe need to add all other projects like - * ext-commerce, ext-pim to the Launch configuration classpath. + * From eclipse, just run as Java Application. Note that you maybe need + * to add all other projects like ext-commerce, ext-pim to the Launch + * configuration classpath. */ public class CommercedbsynchacStandalone { /** * Main class to be able to run it directly as a java program. * - * @param args the arguments from commandline + * @param args + * the arguments from commandline */ public static void main(final String[] args) { new CommercedbsynchacStandalone().run(); @@ -35,8 +37,8 @@ public void run() { Registry.activateMasterTenant(); final JaloSession jaloSession = JaloSession.getCurrentSession(); - System.out.println("Session ID: " + jaloSession.getSessionID()); //NOPMD - System.out.println("User: " + jaloSession.getUser()); //NOPMD + System.out.println("Session ID: " + jaloSession.getSessionID()); // NOPMD + System.out.println("User: " + jaloSession.getUser()); // NOPMD Utilities.printAppInfo(); RedeployUtilities.shutdown(); diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/constants/YhacextConstants.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/constants/YhacextConstants.java index da69f92..7b1c1a5 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/constants/YhacextConstants.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/constants/YhacextConstants.java @@ -1,20 +1,20 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ -package com.sap.cx.boosters.commercedbsynchac.constants; -import com.sap.cx.boosters.commercedbsynchac.constants.GeneratedYhacextConstants; +package com.sap.cx.boosters.commercedbsynchac.constants; /** - * Global class for all Commercedbsynchac constants. You can add global constants for your extension into this class. + * Global class for all Commercedbsynchac constants. You can add global + * constants for your extension into this class. */ public final class YhacextConstants extends GeneratedYhacextConstants { public static final String EXTENSIONNAME = "commercedbsynchac"; private YhacextConstants() { - //empty to avoid instantiating this constant class + // empty to avoid instantiating this constant class } // implement here constants used by this extension diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/MetricService.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/MetricService.java index a0131e8..7427122 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/MetricService.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/MetricService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/impl/DefaultMetricService.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/impl/DefaultMetricService.java index b63be52..4d4aacf 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/impl/DefaultMetricService.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/impl/DefaultMetricService.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -20,7 +20,7 @@ public class DefaultMetricService implements MetricService { private static final Logger LOG = LoggerFactory.getLogger(DefaultMetricService.class); - private List populators; + private final List populators; public DefaultMetricService(List populators) { this.populators = populators; diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/MetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/MetricPopulator.java index 3e328d4..f9f226e 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/MetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/MetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -10,10 +10,10 @@ import com.sap.cx.boosters.commercedbsync.context.MigrationContext; public interface MetricPopulator { - static String PRIMARY_STANDARD_COLOR = "#92cae4"; - static String PRIMARY_CRITICAL_COLOR = "#de5d70"; - static String SECONDARY_STANDARD_COLOR = "#d5edf8"; - static String SECONDARY_CRITICAL_COLOR = "#e8acb5"; + String PRIMARY_STANDARD_COLOR = "#92cae4"; + String PRIMARY_CRITICAL_COLOR = "#de5d70"; + String SECONDARY_STANDARD_COLOR = "#d5edf8"; + String SECONDARY_CRITICAL_COLOR = "#e8acb5"; MetricData populate(MigrationContext context) throws Exception; diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/CpuMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/CpuMetricPopulator.java index 34f0867..d7c0f9f 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/CpuMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/CpuMetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/DTUMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/DTUMetricPopulator.java index c1cc45d..de88c25 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/DTUMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/DTUMetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariConnectionMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariConnectionMetricPopulator.java index 29b01b6..4fa8df3 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariConnectionMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariConnectionMetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -30,7 +30,7 @@ public MetricData populate(MigrationContext context) throws Exception { data.setPrimaryValue(activeConnections); data.setPrimaryValueLabel("Active"); data.setPrimaryValueUnit("#"); - data.setPrimaryValueThreshold((double) maxConnections); + data.setPrimaryValueThreshold(maxConnections); data.setSecondaryValue(maxConnections - activeConnections); data.setSecondaryValueLabel("Idle"); data.setSecondaryValueUnit("#"); diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariSourceConnectionMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariSourceConnectionMetricPopulator.java index b7b105f..8ea13ad 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariSourceConnectionMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariSourceConnectionMetricPopulator.java @@ -1,12 +1,11 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsynchac.metric.populator.impl; - import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import javax.sql.DataSource; diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariTargetConnectionMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariTargetConnectionMetricPopulator.java index f83041c..465bdb2 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariTargetConnectionMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/HikariTargetConnectionMetricPopulator.java @@ -1,12 +1,11 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ package com.sap.cx.boosters.commercedbsynchac.metric.populator.impl; - import com.sap.cx.boosters.commercedbsync.context.MigrationContext; import javax.sql.DataSource; diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/IOMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/IOMetricPopulator.java index 580dd00..973cb99 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/IOMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/IOMetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -15,7 +15,7 @@ public class IOMetricPopulator implements MetricPopulator { - private PerformanceProfiler performanceProfiler; + private final PerformanceProfiler performanceProfiler; public IOMetricPopulator(PerformanceProfiler performanceProfiler) { this.performanceProfiler = performanceProfiler; @@ -24,8 +24,10 @@ public IOMetricPopulator(PerformanceProfiler performanceProfiler) { @Override public MetricData populate(MigrationContext context) throws Exception { MetricData data = new MetricData(); - int avgRowReading = (int) performanceProfiler.getAverageByCategoryAndUnit(PerformanceCategory.DB_READ, PerformanceUnit.ROWS); - int avgRowWriting = (int) performanceProfiler.getAverageByCategoryAndUnit(PerformanceCategory.DB_WRITE, PerformanceUnit.ROWS); + int avgRowReading = (int) performanceProfiler.getAverageByCategoryAndUnit(PerformanceCategory.DB_READ, + PerformanceUnit.ROWS); + int avgRowWriting = (int) performanceProfiler.getAverageByCategoryAndUnit(PerformanceCategory.DB_WRITE, + PerformanceUnit.ROWS); int totalIO = avgRowReading + avgRowWriting; if (avgRowReading < 1 && avgRowWriting < 1) { avgRowReading = -1; diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/MemoryMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/MemoryMetricPopulator.java index 8a75b38..735b7c2 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/MemoryMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/MemoryMetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/TaskExecutorMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/TaskExecutorMetricPopulator.java index 7bbad47..7b0d7e1 100644 --- a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/TaskExecutorMetricPopulator.java +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/TaskExecutorMetricPopulator.java @@ -1,5 +1,5 @@ /* - * Copyright: 2022 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. * License: Apache-2.0 * */ @@ -15,8 +15,8 @@ public class TaskExecutorMetricPopulator implements MetricPopulator { - private AsyncTaskExecutor executor; - private String name; + private final AsyncTaskExecutor executor; + private final String name; public TaskExecutorMetricPopulator(AsyncTaskExecutor executor, String name) { this.executor = executor; diff --git a/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/ThreadPoolMetricPopulator.java b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/ThreadPoolMetricPopulator.java new file mode 100644 index 0000000..3b41e28 --- /dev/null +++ b/commercedbsynchac/src/com/sap/cx/boosters/commercedbsynchac/metric/populator/impl/ThreadPoolMetricPopulator.java @@ -0,0 +1,49 @@ +/* + * Copyright: 2023 SAP SE or an SAP affiliate company and commerce-db-synccontributors. + * License: Apache-2.0 + * + */ + +package com.sap.cx.boosters.commercedbsynchac.metric.populator.impl; + +import com.sap.cx.boosters.commercedbsync.concurrent.DataThreadPoolFactory; +import com.sap.cx.boosters.commercedbsync.context.MigrationContext; +import com.sap.cx.boosters.commercedbsynchac.metric.populator.MetricPopulator; +import de.hybris.platform.commercedbsynchac.data.MetricData; +import org.apache.commons.lang.StringUtils; + +public class ThreadPoolMetricPopulator implements MetricPopulator { + + private final DataThreadPoolFactory factory; + private final String name; + + public ThreadPoolMetricPopulator(DataThreadPoolFactory factory, String name) { + this.factory = factory; + this.name = name; + } + + @Override + public MetricData populate(MigrationContext context) throws Exception { + MetricData data = new MetricData(); + double activeCount = factory.getMonitor().getActiveCount(); + double maxPoolSize = factory.getMonitor().getMaxPoolSize(); + if (maxPoolSize < 1) { + // make primary and secondary value negative to indicate inactive widget + activeCount = -1; + maxPoolSize = -2; + } + data.setMetricId(name + "-executor"); + data.setName(StringUtils.capitalize(name) + " Tasks"); + data.setDescription("The workers running in parallel in the task executor"); + data.setPrimaryValue(activeCount); + data.setPrimaryValueLabel("Running"); + data.setPrimaryValueUnit("#"); + data.setPrimaryValueThreshold(-1d); + data.setSecondaryValue(maxPoolSize - activeCount); + data.setSecondaryValueLabel("Free"); + data.setSecondaryValueUnit("#"); + data.setSecondaryValueThreshold(-1d); + populateColors(data); + return data; + } +} diff --git a/docs/configuration/CONFIGURATION-REFERENCE.md b/docs/configuration/CONFIGURATION-REFERENCE.md index 6c05d7e..1ce2ec7 100644 --- a/docs/configuration/CONFIGURATION-REFERENCE.md +++ b/docs/configuration/CONFIGURATION-REFERENCE.md @@ -1,51 +1,78 @@ + # SAP Commerce DB Sync - Configuration Reference -| Property | Mandatory | Default | Description | -|--------------------------------------------------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------| -| migration.ds.source.db.driver | yes | | DB driver class for source connection | -| migration.ds.source.db.url | yes | | DB url for source connection | -| migration.ds.source.db.username | yes | | DB username for source connection | -| migration.ds.source.db.password | yes | | DB password for source connection | -| migration.ds.source.db.tableprefix | no | | DB table prefix for source connection | -| migration.ds.source.db.schema | yes | | DB schema for source connection | -| migration.ds.source.db.connection.pool.size.idle.min | no | ${db.pool.minIdle} | Min idle connections in source db pool | -| migration.ds.source.db.connection.pool.size.active.max | no | ${db.pool.maxActive} | Min active connections in source db pool | -| migration.ds.target.db.driver | no | ${db.driver} | DB driver class for target connection | -| migration.ds.target.db.url | no | ${db.url} | DB url for target connection | -| migration.ds.target.db.username | no | ${db.username} | DB username for target connection | -| migration.ds.target.db.password | no | ${db.password} | DB password for target connection | -| migration.ds.target.db.tableprefix | no | ${db.tableprefix} | DB table prefix for target connection | -| migration.ds.target.db.schema | no | dbo | DB schema for target connection | -| migration.ds.target.db.connection.pool.size.idle.min | no | ${db.pool.minIdle} | Min idle connections in target db pool | -| migration.ds.target.db.connection.pool.size.active.max | no | ${db.pool.maxActive} | Min active connections in target db pool | -| migration.ds.target.db.max.stage.migrations | no | 5 | The maximum amount of staged table sets allowed. | -| migration.schema.enabled | no | true | Enable schema adaption features | -| migration.schema.target.tables.add.enabled | no | false | Allow adding missing tables to target schema | -| migration.schema.target.columns.add.enabled | no | true | Allow adding missing columns to target table schema | -| migration.schema.target.columns.remove.enabled | no | true | Allow removing extra columns from target table schema | -| migration.data.reader.batchsize | no | 1000 | batch size when reading data from source table | -| migration.data.workers.writer.maxtasks | no | 10 | maximum number of writer workers per table that can be executed in parallel | -| migration.data.workers.reader.maxtasks | no | 3 | maximum number of reader workers per table that can be executed in parallel | -| migration.data.workers.retryattempts | no | 0 | retry attempts if a batch (read or write) failed. | -| migration.data.truncate.enabled | no | true | Allow truncating the target table before writing data | -| migration.data.truncate.excluded | no | | If truncating enabled, exclude these tables. Comma seperated list | -| migration.data.maxparalleltablecopy | no | 2 | Tables copied in parallel | -| migration.data.columns.excluded.{table} | no | | Columns to be ignored when writing data to target table. The {table} value has to be replaced with the table name, the property value is a comma separated list of column names. | -| migration.data.columns.nullify.{table} | no | | Column values to be nullified when writing data to target table. The {table} value has to be replaced with the table name, the property value is a comma separated list of column names. | -| migration.data.indices.disable.enabled | no | false | Disable indices temporarily before writing data to target table and reenable them after the writing operation. | -| migration.data.indices.drop.enabled | no | false | Drop indices before writing data to target table. | -| migration.data.tables.excluded | no | SYSTEMINIT | Tables to be excluded in migration. If migration.data.tables.included is set, this property is ignored | -| migration.data.tables.included | no | | Tables to be included in migration. If migration.data.tables.excluded is set, this property is ignored | -| migration.data.report.connectionstring | yes | ${media.globalSettings.cloudAzureBlobStorageStrategy.connection} | target blob storage for the report generation | -| migration.data.incremental.enabled | no | false | enables the incremental mode | -| migration.data.incremental.tables | no | | enables the incremental mode | Only these tables will be taken into account for incremental migration -| migration.data.incremental.timestamp | no | | The timestamp in ISO-8601 local date time format. Records created or modified after this timestamp will be copied only. -| migration.data.pipe.timeout | no | 7200 | The max time the pipe can blocked if it is running full before it times out. -| migration.data.pipe.capacity | no | 100 | The maximum amount of element the pipe can handle before it starts blocking. -| migration.stalled.timeout | no | 7200 | The time after which the pipe (and hence the migration) will be marked as stalled. -| migration.data.view.t.{table}.enabled | no | | Enables view creation for the table -| migration.data.view.t.{table}.columnPrefix | no | | Puts prefix to the columns of view's SELECT statement. Required only if view uses JOIN with other tables. -| migration.data.view.t.{table}.joinWhereClause | no | | Join and Where Clause for the view -| migration.data.view.t.{table}.columnTransformation.{columname} | no | | Basic transformation for the column -| migration.data.t.typeinfo.{type}.enabled | no | | Puts type PK into MIGRATIONTOOLKIT_TF_TYPEINFO table \ No newline at end of file +| Property | Description | Default | values | optional | dependency | +| --- | --- | --- | --- | --- | --- | +| migration.cluster.enabled | Run migration in the cluster (based on commerce cluster config). The 'HAC' node will be the primary one. A scheduling algorithm decides which table will run on which node. Nodes are notified using cluster events.| `false` | true or false | true | | +| migration.data.columns.excluded.attributedescriptors | Specifies the columns to be excluded| | migration.data.columns.excluded.[tablename]=[comma separated list of column names] | true | | +| migration.data.columns.nullify.attributedescriptors | Specifies the columns to be nullified. Whatever value there was will be replaced with NULL in the target column.| | migration.data.columns.nullify.[tablename]=[comma separated list of column names] | true | | +| migration.data.export.enabled | Activate data export to external DB via cron jobs| `false` | true or false | true | | +| migration.data.failonerror.enabled | If set to true, the migration will abort as soon as an error occured. If set to false, the migration will try to continue if the state of the runtime allows.| `true` | true or false | true | | +| migration.data.filestorage.container.name | Specifies the name of the container where the tool will store the files related to migration in the blob storage pointed by the property {migration.data.report.connectionstring}| `migration` | any string | migration | | +| migration.data.fulldatabase.enabled | | `true` | | | | +| migration.data.incremental.enabled | If set to true, the migration will run in incremental mode. Only rows that were modified after a given timestamp will be taken into account.| `false` | true or false | true | | +| migration.data.incremental.tables | Only these tables will be taken into account for incremental migration.| | comma separated list of tables. | true | migration.data.incremental.enabled | +| migration.data.incremental.timestamp | Records created or modified after this timestamp will be copied only.| | The timestamp in ISO-8601 ISO_ZONED_DATE_TIME format | true | migration.data.incremental.enabled | +| migration.data.indices.disable.enabled | If set to true, all indices in the target table will be disabled (NOT removed) before copying over the data. After the data copy the indices will be enabled and rebuilt again.| `false` | true of false | true | | +| migration.data.indices.disable.included | If disabling of indices is enabled, this property specifies the tables that should be included. If no tables specified, indices for all tables will be disabled.| | comma separated list of tables | true | migration.data.indices.disable.enabled | +| migration.data.indices.drop.enabled | If set to true, all indices in the target table will be removed before copying over the data.| `false` | true of false | true | | +| migration.data.indices.drop.recreate.exclude | do not recreate following indices after the migration. Comma separated values| | comma separated values | true | | +| migration.data.maxparalleltablecopy | Specifies the number of tables that are copied over in parallel.| `2` | integer value | true | | +| migration.data.pipe.capacity | Specifies the capacity of the data pipe.| `100` | integer value | true | | +| migration.data.pipe.timeout | Specifies the timeout of the data pipe.| `7200` | integer value | true | | +| migration.data.reader.batchsize | | `1000` | | | | +| migration.data.report.connectionstring | Specifies blob storage connection string for storing reporting files.| `${media.globalSettings.cloudAzureBlobStorageStrategy.connection}` | any azure blob storage connection string | true | | +| migration.data.tables.audit.enabled | Flag to enable the migration of audit tables.| `true` | true or false | true | | +| migration.data.tables.custom | Specifies a list of custom tables to migrate. Custom tables are tables that are not part of the commerce type system.| | comma separated list of table names. | true | | +| migration.data.tables.excluded | Tables to exclude from migration (use table names name without prefix)| `SYSTEMINIT,StoredHttpSessions,itemdeletionmarkers` | comma separated list of table names. | true | | +| migration.data.tables.included | Tables to include (use table names name without prefix)| | comma separated list of table names. | true | | +| migration.data.truncate.enabled | Specifies if the target tables should be truncated before data is copied over.| `true` | true or false | true | | +| migration.data.truncate.excluded | If truncation of target tables is enabled, this property specifies tables that should be excluded from truncation.| | comma separated list of table names | true | migration.data.truncate.enabled | +| migration.data.view.name.pattern | Support views during data migration. String pattern for view naming convention with `'%s'` as table name. e.g. `v_%s`| `v_%s` | any string | true | | +| migration.data.view.t.TABLE.columnTransformation.COLUMN | Possibility to use custom functions to obfuscate values for specific columns| `GETDATE()` | any valid SQL function call | true | migration.data.view.t.TABLE.enabled | +| migration.data.view.t.TABLE.enabled | Activate DDL view generation for specific| `false` | any string | true | | +| migration.data.view.t.TABLE.joinWhereClause | Activate DDL view generation for specific _TABLE_, with additional `JOIN` clausule| `{table}` | any string | true | migration.data.view.t.TABLE.enabled | +| migration.data.workers.reader.maxtasks | Specifies the number of threads used per table to read data from source. Note that this value applies per table, so in total the number of threads will depend on 'migration.data.maxparalleltablecopy'. [total number of reader threads] = [migration.data.workers.reader.maxtasks] * [migration.data.maxparalleltablecopy]| `3` | integer value | true | migration.data.maxparalleltablecopy | +| migration.data.workers.retryattempts | Specifies the number of retries in case a worker task fails.| `0` | integer value | true | | +| migration.data.workers.writer.maxtasks | Specifies the number of threads used per table to write data to target. Note that this value applies per table, so in total the number of threads will depend on 'migration.data.maxparalleltablecopy'. [total number of writer threads] = [migration.data.workers.writer.maxtasks] * [migration.data.maxparalleltablecopy]| `10` | integer value | true | migration.data.maxparalleltablecopy | +| migration.ds.source.db.connection.pool.size.active.max | Specifies maximum amount of active connections in the source db pool| `${db.pool.maxActive}` | integer value | false | | +| migration.ds.source.db.connection.pool.size.idle.max | Specifies maximum amount of connections in the source db pool| `${db.pool.maxIdle}` | integer value | false | | +| migration.ds.source.db.connection.pool.size.idle.min | Specifies minimum amount of idle connections available in the source db pool| `${db.pool.minIdle}` | integer value | false | | +| migration.ds.source.db.driver | Specifies the driver class for the source jdbc connection| | any valid jdbc driver class | false | | +| migration.ds.source.db.password | Specifies the password for the source jdbc connection| | any valid password for the jdbc connection | false | | +| migration.ds.source.db.schema | Specifies the schema the respective commerce installation is deployed to.| | any valid schema name for the commerce installation | false | | +| migration.ds.source.db.tableprefix | Specifies the table prefix used on the source commerce database. This may be relevant if a commerce installation was initialized using 'db.tableprefix'.| | any valid commerce database table prefix. | true | | +| migration.ds.source.db.typesystemname | Specifies the name of the type system that should be taken into account| `DEFAULT` | any valid type system name | true | | +| migration.ds.source.db.typesystemsuffix | Specifies the suffix which is used for the source typesystem| | the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' | true | migration.ds.source.db.typesystemname | +| migration.ds.source.db.url | Specifies the url for the source jdbc connection| | any valid jdbc url | false | | +| migration.ds.source.db.username | Specifies the user name for the source jdbc connection| | any valid user name for the jdbc connection | false | | +| migration.ds.target.db.catalog | | | | | | +| migration.ds.target.db.connection.pool.size.active.max | Specifies maximum amount of connections in the target db pool| `${db.pool.maxActive}` | integer value | false | | +| migration.ds.target.db.connection.pool.size.idle.max | Specifies maximum amount of idle connections available in the target db pool| `${db.pool.maxIdle}` | integer value | false | | +| migration.ds.target.db.connection.pool.size.idle.min | Specifies minimum amount of idle connections available in the target db pool| `${db.pool.minIdle}` | integer value | false | | +| migration.ds.target.db.driver | Specifies the driver class for the target jdbc connection| `${db.driver}` | any valid jdbc driver class | false | | +| migration.ds.target.db.max.stage.migrations | When using the staged approach, multiple sets of commerce tables may exists (each having its own tableprefix). To prevent cluttering the db, this property specifies the maximum number of table sets that can exist, if exceeded the schema migrator will complain and suggest a cleanup.| `5` | integer value | true | | +| migration.ds.target.db.password | Specifies the password for the target jdbc connection| `${db.password}` | any valid password for the jdbc connection | false | | +| migration.ds.target.db.schema | Specifies the schema the target commerce installation is deployed to.| `dbo` | any valid schema name for the commerce installation | false | | +| migration.ds.target.db.tableprefix | Specifies the table prefix used on the target commerce database. This may be relevant if a commerce installation was initialized using `${db.tableprefix}` / staged approach.| `${db.tableprefix}` | any valid commerce database table prefix. | true | | +| migration.ds.target.db.typesystemname | Specifies the name of the type system that should be taken into account| `DEFAULT` | any valid type system name | true | | +| migration.ds.target.db.typesystemsuffix | Specifies the suffix which is used for the target typesystem| | the suffix used for typesystem. I.e, 'attributedescriptors1' means the suffix is '1' | true | migration.ds.source.db.typesystemname | +| migration.ds.target.db.url | Specifies the url for the target jdbc connection| `${db.url}` | any valid jdbc url | false | | +| migration.ds.target.db.username | Specifies the user name for the target jdbc connection| `${db.username}` | any valid user name for the jdbc connection | false | | +| migration.input.profiles | Specifies the profile name of data source that serves as migration input| `source` | name of the data source profile | true | | +| migration.locale.default | Specifies the default locale used.| `en-US` | any locale | true | | +| migration.log.sql | If set to true, the JDBC queries ran against the source and target data sources will be logged in the storage pointed by the property {migration.data.report.connectionstring}| `false` | true or false | false | | +| migration.log.sql.memory.flush.threshold.nbentries | Specifies the number of log entries to add to the in-memory collection of JDBC log entries of a JDBC queries store before flushing the collection contents into the blob file storage associated with the JDBC store's data souce and clearing the in-memory collection to free memory| `10000000` | an integer number | 10,000,000 | | +| migration.log.sql.source.showparameters | If set to true, the values of the parameters of the JDBC queries ran against the source data source will be logged in the JDBC queries logs (migration.log.sql has to be true to enable this type of logging). For security reasons, the tool will never log parameter values for the queries ran against the target datasource.| `true` | true or false | true | | +| migration.output.profiles | Specifies the profile name of data sources that serves as migration output| `target` | name of the data source profile | true | | +| migration.properties.masked | Specifies the properties that should be masked in HAC.| `migration.data.report.connectionstring,migration.ds.source.db.password,migration.ds.target.db.password` | any property key | true | | +| migration.scheduler.resume.enabled | If set to true, the migration will resume from where it stopped (either due to errors or cancellation).| `false` | true or false | true | | +| migration.schema.autotrigger.enabled | Specifies if the schema migrator should be automatically triggered before data copy process is started| `false` | true or false | true | migration.schema.enabled | +| migration.schema.enabled | Globally enables / disables schema migration. If set to false, no schema changes will be applied.| `true` | true or false | true | | +| migration.schema.target.columns.add.enabled | Specifies if columns which are missing in the target tables should be added by schema migration.| `true` | true or false | true | migration.schema.enabled | +| migration.schema.target.columns.remove.enabled | Specifies if extra columns in target tables (compared to source schema) should be removed by schema migration.| `true` | true or false | true | migration.schema.enabled | +| migration.schema.target.tables.add.enabled | Specifies if tables which are missing in the target should be added by schema migration.| `true` | true or false | true | migration.schema.enabled | +| migration.schema.target.tables.remove.enabled | Specifies if extra tables in target (compared to source schema) should be removed by schema migration.| `false` | true or false | true | migration.schema.enabled | +| migration.stalled.timeout | Specifies the timeout of the migration monitor. If there was no activity for too long the migration will be marked as 'stalled' and aborted.| `7200` | integer value | true | | +| migration.trigger.updatesystem | Specifies whether the data migration shall be triggered by the 'update running system' operation.| `false` | true or false | true | | diff --git a/docs/performance/PERFORMANCE-GUIDE.md b/docs/performance/PERFORMANCE-GUIDE.md index 5be1890..a291156 100644 --- a/docs/performance/PERFORMANCE-GUIDE.md +++ b/docs/performance/PERFORMANCE-GUIDE.md @@ -1,7 +1,9 @@ # SAP Commerce DB Sync - Performance Guide -## AWS Mysql to SAP Commerce Cloud Benchmark +## Benchmarks + +### AWS to SAP Commerce Cloud Source Database: @@ -24,39 +26,12 @@ Results: > **NOTE**: DB size differs in source and target due to different storage concepts (indexes). -## SAP Commerce Cloud to Azure SQL Benchmark - -* Source Database: P15 (4000 DTUs) -* Destination Database: P15 (4000 DTUs) -* Dataset: 524 tables, 242 M records - -Tuning parameters: -* 6 read threads -* 20 write threads -* 10 parallel tables - -Results: -* Full duration: 151 min -* Throughput: 1.6M rows per min - -| TABLE NAME | ROW COUNT | THROUGHPUT (rows per min) | DURATION (min) | -|:--------------------------:|:-----------------:|:----------------------------:|:--------------:| -| PAYMENTTRANSACTIONS | 24,424,355 | 266,158 | 92 | -| PAYMENTINFOS | 24,388,012 | 270,588 | 90 | -| PAYMNTTRNSCTENTRIES | 24,452,182 | 251,359 | 97 | -| CARTENTRIES | 23,514,465 | 284,748 | 83 | -| ORDERENTRIES | 21,049,356 | 265,716 | 79 | -| ADDRESSES | 61,659,137 | 410,341 | 150 | -| CARTS | 12,410,584 | 208,847 | 59 | -| ORDERS | 10,397,869 | 254,734 | 41 | -| PROCESSES | 7,038,088 | 211,540 | 33 | -| PROMOTIONRESULT | 6,958,400 | 254,327 | 27 | -| PROMOTIONACTION | 7,503,149 | 213,532 | 35 | - ## Technical Concept + ![performance technical concept](performance_architecture.png) + ### Scheduler The table scheduler is responsible for triggering the copy process for each table. @@ -149,7 +124,7 @@ The amount of database connections can be defined as follows: ### Disabling Indexes Indexes can be a bottleneck when inserting batches. -MS SQL Server offers a way to temporarily disable indexes during the copy process. +MsSQL offers a way to temporarily disable indexes during the copy process. This can be done using the property: `migration.data.indices.disable.enabled` diff --git a/docs/user/USER-GUIDE-DATA-MIGRATION.md b/docs/user/USER-GUIDE-DATA-MIGRATION.md index b559c4e..39b2000 100644 --- a/docs/user/USER-GUIDE-DATA-MIGRATION.md +++ b/docs/user/USER-GUIDE-DATA-MIGRATION.md @@ -54,6 +54,11 @@ Make sure you add the source db driver to **commercedbsync/lib** if necessary. ## Configure the extensions Configure the extensions as needed in your **local.properties**. See the [Property Configuration Reference](../configuration/CONFIGURATION-REFERENCE.md). +Make sure that data replication mode is disabled: +``` +migration.data.export.enabled=false +``` + At least you have to configure the connection to your source database. Here is an example for mysql: ``` diff --git a/docs/user/USER-GUIDE-DATA-REPLICATION.md b/docs/user/USER-GUIDE-DATA-REPLICATION.md index 74a87f3..f514e6a 100644 --- a/docs/user/USER-GUIDE-DATA-REPLICATION.md +++ b/docs/user/USER-GUIDE-DATA-REPLICATION.md @@ -65,18 +65,19 @@ See [Deletion Support](./SUPPORT-DELETE-GUIDE.md). Properties require to reconfigure or readjusted for Data Sync. -| Property | Mandatory | Default | Description | -|--------------------------------------------------------|-----------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------| -| migration.ds.source.db.url | yes | | DB url for source connection , default value should be **${db.url};ApplicationIntent=ReadOnly** ApplicationIntent can be adjusted or removed for local testing | -| migration.ds.source.db.schema | no | dbo | DB schema for source connection | -| migration.ds.target.db.driver | yes | ${db.driver} | DB driver class for target connection | -| migration.ds.target.db.username | yes | | DB username for target connection | -| migration.ds.target.db.password | yes | | DB password for target connection | -| migration.ds.target.db.tableprefix | no | ${db.tableprefix} | DB table prefix for target connection | -| migration.ds.target.db.schema | no | dbo | DB schema for target connection | -| migration.data.tables.included | no | | Tables to be included in the migration. It is recommended to set this parameter during the first load of selective table sync, which will allow you to sync directly from HAC along with Schema. Eventually you can do very similar with full migration cron jobs by adjusting the list of tables. | -| migration.data.report.connectionstring | yes | ${media.globalSettings.cloudAzureBlobStorageStrategy.connection} | target blob storage for the report generation, although you can replace with Hotfolder Blob storage ${azure.hotfolder.storage.account.connection-string} | -| migration.data.workers.retryattempts | no | 0 | retry attempts if a batch (read or write) failed. | +| Property | Mandatory | Default | Description | +|--------------------------------------------------------|-----------|------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| migration.data.export.enabled | yes | true | Activates data replication mode. Standard migration is not possinble in this state | +| migration.ds.source.db.url | yes | | DB url for source connection , default value should be **${db.url};ApplicationIntent=ReadOnly** ApplicationIntent can be adjusted or removed for local testing | +| migration.ds.source.db.schema | no | dbo | DB schema for source connection | +| migration.ds.target.db.driver | yes | ${db.driver} | DB driver class for target connection | +| migration.ds.target.db.username | yes | | DB username for target connection | +| migration.ds.target.db.password | yes | | DB password for target connection | +| migration.ds.target.db.tableprefix | no | ${db.tableprefix} | DB table prefix for target connection | +| migration.ds.target.db.schema | no | dbo | DB schema for target connection | +| migration.data.tables.included | no | | Tables to be included in the migration. It is recommended to set this parameter during the first load of selective table sync, which will allow you to sync directly from HAC along with Schema. Eventually you can do very similar with full migration cron jobs by adjusting the list of tables. | +| migration.data.report.connectionstring | yes | ${media.globalSettings.cloudAzureBlobStorageStrategy.connection} | target blob storage for the report generation, although you can replace with Hotfolder Blob storage ${azure.hotfolder.storage.account.connection-string} | +| migration.data.workers.retryattempts | no | 0 | retry attempts if a batch (read or write) failed. | ## CronJob Configuration reference Data Sync