diff --git a/AAE/README_config.md b/AAE/README_config.md deleted file mode 100644 index 0723ce02..00000000 --- a/AAE/README_config.md +++ /dev/null @@ -1,121 +0,0 @@ -# Configuring IBM Business Automation Application Engine (App Engine) 20.0.1 - -These instructions cover the basic installation and configuration of IBM Business Automation Application Engine (App Engine). - -## Table of contents -- [App Engine Component Details](#App-engine-component-details) -- [Prerequisites](#Prerequisites) -- [Resources Required](#Resources-required) -- [Step 1: Preparing to install App Engine for Production](#Step-1-preparing-to-install-app-engine-for-production) -- [Step 2: Configuring Redis for App Engine (Optional)](#Step-2-configuring-redis-for-app-Engine-optional) -- [Step 3: Implementing storage (Optional)](#Step-3-implementing-storage-optional) -- [Step 4: Configuring the custom resource YAML file for your App Engine deployment](#Step-4-configuring-the-custom-resource-YAML-file-for-your-app-engine-deployment) -- [Step 5: Completing the installation](#Step-5-completing-the-installation) -- [Limitations](#Limitations) - -## Introduction - -This installation deploys the App Engine, a user interface service tier to run applications that are built by IBM Business Automation Application Designer (App Designer). - -## App Engine Component Details - -This component deploys several services and components. - -In the standard configuration, it includes these components: - -* IBM Resource Registry component -* IBM Business Automation Application Engine (App Engine) component - -To support those components, a standard installation generates: - - * 3 or more ConfigMaps that manage the configuration of App Engine, depending on the customized configuration - * 1 or more deployment running App Engine, depending on the customized configuration - * 4 or more pods for Resource Registry, depending on the customized configuration - * 1 service account with related role and role binding - * 3 secrets to get access during operator installation - * 3 services and optionally an Ingress or Route (OpenShift) to route the traffic to the App Engine - -## Prerequisites - - * [Remote Dictionary Server (Redis)](http://download.redis.io/releases/) - * [User Management Service](../UMS/README_config.md) - * Resource Registry, which is included in the App Engine configuration. If you already configured Resource Registry through another component, you need not install it again. - -## Resources Required - -Follow the OpenShift instructions in [Planning Your Installation 3.11](https://docs.openshift.com/container-platform/3.11/install/index.html#single-master-single-box) or [Planning your Installation 4.2](https://docs.openshift.com/container-platform/4.2/welcome/index.html). Then check the required resources in [System and Environment Requirements on OCP 3.11](https://docs.openshift.com/container-platform/3.11/install/prerequisites.html) or [System and Environment Requirements on OCP 4.2](https://docs.openshift.com/container-platform/4.2/architecture/architecture.html) and set up your environment. - -| Component name | Container | CPU | Memory | -| --- | --- | --- | --- | -| App Engine | App Engine container | 1 | 1Gi | -| App Engine | Init containers | 200m | 128Mi | -| Resource Registry | Resource Registry container | 200m | 256Mi | -| Resource Registry | Init containers | 100m | 128Mi | - - -## Step 1: Preparing to install App Engine for Production - -Besides the common steps to set up the operator environment, you must do the following steps before you install App Engine. - -* Create the App Engine database. See [Creating the database](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_aeprep_db.html). -* Create the required secrets. See [Creating secrets to protect sensitive configuration data](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_aeprep_data.html). - -## Step 2: Configuring Redis for App Engine (Optional) - -You can configure App Engine with Remote Dictionary Server (Redis) to provide more reliable service. See [Configuring App Engine with Redis](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_aeprep_redis.html). - -## Step 3: Implementing storage (Optional) - -You can optionally add your own persistent volume (PV) and persistent volume claim (PVC) if you want to use your own JDBC driver or you want Resource Registry to be backed up automatically. The minimum supported size is 1 GB. For instructions, see [Optional: Implementing storage](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_aeprep_storage.html). - - -## Step 4: Configuring the custom resource YAML file for your App Engine deployment - -1. Make sure that you've set the configuration parameters for the [User Management Service](../UMS/README_config.md) in your copy of the template custom resource YAML file. - -2. Edit your copy of the template custom resource YAML file and make the following updates. After completing those updates, if you need to install other components, please go to [Step 5](README_config.md#step-5-completing-the-installation) and do the configuration for those components, using the same YAML file. - - a. Uncomment and update the `shared_configuration` section if you haven't done it already. - - b. Update the `application_engine_configuration` and `resource_registry_configuration` sections. - * Automatic backup for Resource Registry is recommended. See [Enabling Resource Registry disaster recovery](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.managing/topics/tsk_enabling_disaster_recovery.html) for configuration information. - - * If you just want to install App Engine with the minimal required values, replace the contents of `application_engine_configuration` and `resource_registry_configuration` in your copy of the template custom resource YAML file with the values from the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - - * If you want to use the full configuration list and customize the values, update the required values in `application_engine_configuration` and `resource_registry_configuration` in your copy of the template custom resource YAML file based on your configuration. - -### Configuration -If you want to customize your custom resource YAML file, refer to the [configuration list](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ae_params.html) for each parameter. - -## Step 5: Completing the installation - -Go back to the relevant installation or update page to configure other components and complete the deployment with the operator. - -Installation pages: - - [Managed OpenShift installation page](../platform/roks/install.md) - - [OpenShift installation page](../platform/ocp/install.md) - - [Certified Kubernetes installation page](../platform/k8s/install.md) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) - -## Limitations - -* After you deploy the App Engine, you can't change App Engine admin user. - -* Because of a Node.js server limitation, App Engine trusts only root CA. If an external service is used and signed with another root CA, you must add the root CA as trusted instead of the service certificate. - - * The certificate can be self-signed, or signed by a well-known root CA. - * If you're using a depth zero self-signed certificate, it must be listed as a trusted certificate. - * If you're using a certificate signed by a self-signed root CA, the self-signed CA must be in the trusted list. Using a leaf certificate in the trusted list is not supported. - * If you're adding the root CA of two or more external services to the App Engine trust list, you can't use the same common name for those root CAs. - -* The App Engine supports only the IBM DB2 database. - -* Resource Registry limitation - - Because of the design of etcd, it's recommended that you don't change the replica size after you create the Resource Registry cluster to prevent data loss. If you must set the replica size, set it to an odd number. If you reduce the pod size, the pods are destroyed one by one slowly to prevent data loss or the cluster getting out of sync. - * If you update the Resource Registry admin secret to change the username or password, first delete the -dba-rr- pods to cause Resource Registry to enable the updates. Alternatively, you can enable the update manually with etcd commands. - * If you update the Resource Registry configurations in the icp4acluster custom resource instance. the update might not affect the Resource Registry pod directly. It will affect the newly created pods when you increase the number of replicas. diff --git a/AAE/README_migrate.md b/AAE/README_migrate.md deleted file mode 100644 index d3ebc214..00000000 --- a/AAE/README_migrate.md +++ /dev/null @@ -1,34 +0,0 @@ - -# Migrating from IBM Business Automation Application Engine (App Engine) 19.0.2 to 20.0.1 - -These instructions cover the migration of IBM Business Automation Application Engine (App Engine) from 19.0.2 to 20.0.1. - -## Introduction - -If you install App Engine 19.0.2 and want to continue to use your 19.0.2 applications in App Engine 20.0.1, you can migrate your applications from App Engine 19.0.2 to 20.0.1. - -## Step 1: Export apps that were authored in 19.0.2 - -Log in to the admin console in your IBM Business Automation Studio 19.0.2 environment, then export your apps as IBM Business App Installation Package (.zip) files. - -## Step 2: Publish the apps to App Engine through Business Automation Navigator - -Publish your apps to App Engine through Business Automation Navigator and make sure they work without errors. - -## Step 3: Shut down the App Engine 19.0.2 environment - -Log in to your OpenShift environment to stop all the development pods. You can scale down the number of development pods to 0 by using the OpenShift console. (Note: JMS and the Resource Registry are stateful and can't be scaled down from the OpenShift console. Keeping them won't impact your next action.) - -## Step 4: Reuse the App Engine database from 19.0.2 - -Reuse the existing App Engine database. Update the database configuration information under application_engine_configuration in the custom resource YAML file. - -## Step 5: Install App Engine 20.0.1 - -[Install IBM Business Automation Application Engine](../AAE/README_config.md). - -## Step 6: Migrate IBM Business Automation Navigator from 19.0.2 to 20.0.1 to verify your apps - -Following the [IBM Business Automation Navigator migration instructions](../BAN/README_migrate.md), migrate Business Automation Navigator from 19.0.2 to 20.0.1. Then, test your apps. - - diff --git a/AAE/README_upgrade.md b/AAE/README_upgrade.md deleted file mode 100644 index 1261ba70..00000000 --- a/AAE/README_upgrade.md +++ /dev/null @@ -1,34 +0,0 @@ -# Upgrading from IBM Business Automation Application Engine (App Engine) 19.0.3 to 20.0.1 - -These instructions cover the upgrade of IBM Business Automation Application Engine (App Engine) from 19.0.3 to 20.0.1. - -## Introduction - -If you installed App Engine 19.0.3 and want to continue to use your 19.0.3 applications in App Engine 20.0.1, you can upgrade your applications from App Engine 19.0.3 to 20.0.1. - -## Step 1: Update the custom resource YAML file for your App Engine 20.0.1 deployment - -Get the custom resource YAML file that you used to deploy App Engine for 19.0.3, and edit it by following these steps: - -1. Change the release version from 19.0.3 to 20.0.1. - -2. Add `appVersion: 20.0.1` to the `spec` section. See the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - -3. Update the `application_engine_configuration` and `resource_registry_configuration` sections. - * Automatic backup for Resource Registry is recommended. See [Enabling Resource Registry disaster recovery](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.managing/topics/tsk_enabling_disaster_recovery.html) for configuration information. - - * If you just want to update App Engine with the minimal required values, use the values in the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - * Add `admin_user` to the `application_engine_configuration` sections. - * Change the image tags from 19.0.3 to 20.0.1 in all sections. - - * If you want to use the full configuration list and customize the values, update the required values in the `application_engine_configuration` and `resource_registry_configuration` sections in your custom resource YAML file based on your configuration. See the [configuration list](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ae_params.html) for each parameter. - -## Step 2: Update the configuration sections for other deployments - -To update the configuration sections for other components, such as User Management Service and IBM Business Automation Navigator, go back to the relevant upgrade page to follow their upgrade documents to update your custom resource YAML file. - -Upgrade pages: - - [Managed OpenShift upgrade page](../platform/roks/upgrade.md) - - [OpenShift upgrade page](../platform/ocp/upgrade.md) - - [Certified Kubernetes upgrade page](../platform/k8s/upgrade.md) - diff --git a/AAE/configuration/sample_min_value.yaml b/AAE/configuration/sample_min_value.yaml deleted file mode 100644 index fcb8d902..00000000 --- a/AAE/configuration/sample_min_value.yaml +++ /dev/null @@ -1,41 +0,0 @@ -# Minimal required values for App Engine -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -spec: - appVersion: 20.0.1 - ## Production configuration - ## App Engine configuration - application_engine_configuration: - ## The application_engine_configuration is a list. You can deploy multiple instances of App Engine and assign different configurations for each instance. - ## For each instance, application_engine_configuration.name and application_engine_configuration.name.hostname must have different values. - - name: ae-instance1 - hostname: - port: 443 - admin_secret_name: ae-secret-credential - admin_user: - database: - host: - name: - port: - ## If you set up DB2 HADR and want to use it, you must configure alternative_host and alternative_port. Otherwise, leave them blank. - alternative_host: - alternative_port: - images: - db_job: - repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db - tag: 20.0.1 - solution_server: - repository: cp.icr.io/cp/cp4a/aae/solution-server - tag: 20.0.1 - - ## Resource Registry Configuration - ## Important: If you've already configured Resource Registry, you don't need to change the resource_registry_configuration section in your copy of the template custom resource YAML file. - resource_registry_configuration: - admin_secret_name: resource-registry-admin-secret - hostname: - port: 443 - images: - resource_registry: - repository: cp.icr.io/cp/cp4a/aae/dba-etcd - tag: 20.0.1 - diff --git a/ACA/README.md b/ACA/README.md deleted file mode 100644 index 3830627f..00000000 --- a/ACA/README.md +++ /dev/null @@ -1,19 +0,0 @@ -# Configuring IBM Business Automation Content Analyzer 20.0.1 - -These instructions cover the basic configuration of Business Automation Content Analyzer. - - - -### Step n Complete the installation - -Go back to the relevant install or update page to configure other components and complete the deployment with the operator. - -Install pages: - - [Managed OpenShift installation page](../platform/roks/install.md) - - [OpenShift installation page](../platform/ocp/install.md) - - [Certified Kubernetes installation page](../platform/k8s/install.md) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) diff --git a/ACA/README_config.md b/ACA/README_config.md deleted file mode 100644 index 4a1e8f39..00000000 --- a/ACA/README_config.md +++ /dev/null @@ -1,125 +0,0 @@ -# IBM® Business Automation Content Analyzer - - -## Introduction - -This readme provide instruction to deploy IBM Business Automation Content Analyzer with IBM® Cloud Pak for Automation platform. IBM Business Automation Content Analyzer offers the power of intelligent capture with the flexibility of an API that enables you to extend the value of your core enterprise content management (ECM) technology stack and helps you rapidly accelerate extraction and classification of data in your documents. - - -Requirements to Prepare Your Environment ------------- - -### NOTE: -Verify the latest release of IBM Business Automation Content Analyzer with IBM® Cloud Pak for Automation platform in IBM Fix Central or Entitlement Registry and use that release for deployment. -For example: There is a new version of IBM Business Automation Content Analyzer with IBM® Cloud Pak for Automation platform, 20.0.1-ifix1, for the 20.0.1 release. For deployment, edit the CR yaml file. In the `ca_configuration` section, use `20.0.1-ifix1` as the value for the `tag` parameter. - -### Step 1 - Preparing users for Content Analyzer - -Content Analyzer users need to be configured on the LDAP server. See [Preparing users for Content Analyzer](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_bacak8s_usergroups.html) for detailed instructions. - -### Step 2 - Create DB2 databases for Content Analyzer - -For development or testing purposes, you can skip this step and move to "Step 3 - Initialize the Content Analyzer Base database" if you prefer for the Content Analyzer scripts to create the database for you. - -See [Create the Db2 database](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_bacak8s_createdb2.html) for detailed instructions. - -### Step 3 - Initialize the Content Analyzer Base database - -If you do not have a Db2® database set up, do so now. - -See [Initializing the Content Analyzer Base database](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_bacak8s_db.html) for detailed instructions. - -### Step 4 - Initialize the Content Analyzer Tenant database(s) - -If you do not have a tenant database, set up a Db2 tenant database. - -See [Initializing the Tenant database](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_bacak8s_dbtenant.html) for detailed instructions. - -### Step 5 - Optional - DB2 High-Availability - -You can set up a Db2 High Availability Disaster Recovery (HADR) database. - -See [Setting up Db2 High-Availability](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_cadb2ha.html) for detailed instructions. - -### Step 6 - Create prerequisite resources for IBM Business Automation Content Analyzer - -Set up and configure storage to prepare for the container configuration and deployment. You set up permissions to PVC directories, label worker nodes, create the docker secret, create security, and enable SSL communication for LDAP if necessary. - -See [Configuring storage and the environment](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_bacak8s_storage.html) for detailed instructions. - -### Step 7 - Configuring the CR YAML file - -Update the custom YAML file to provide the details that are relevant to your IBM Business Automation Content Analyzer and your decisions for the deployment of the container. - -NOTE: Review this [technote](https://www.ibm.com/support/pages/node/6178437) if you deploy Content Analyzer on ROKS. - - -See [Content Analyzer parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_k8sca_operparams.html) for detailed instructions. - -### Step 8 - Deployment ------------ -1) Once all the required parameters have been filled out for Content Analyzer, the CR can be applied by - -``` - -oc -n apply -f - -``` -where: -`ns` is the namespace name where you want to install Content Analyzer. -`CR yaml` is the CR yaml name. - -2) The Operator container will deploy Content Analyzer. For more information about Operator, refer to -https://github.com/icp4a/cert-kubernetes/tree/20.0.1/ - - -Post Deployment --------------- - -## Post Deployment steps for route (OpenShift) setup - -You can deploy IBM Business Automation Content Analyzer by using an OpenShift route as the ingress point to provide fronted and backend services through an externally reachable, unique hostname such as www.backend.example.com and www.frontend.example.com. A defined route and the endpoints, which are identified by its service, can be consumed by a router to provide named connectivity that allows external clients to reach your applications. - -See [Configuring an OpenShift route](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_postcadeploy_routeOS.html) for detailed instructions. - -## Post Deployment steps for NodePort (Non OpenShift) setup - -You can modify your LoadBalancer, like the HAProxy, in the Kubernetes cluster to route the request to a specific node port. - -See [Configuring routing to a node port](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_postcadeploy_nodeport_NOS.html) for detailed instructions. - -## Troubleshooting - -This section describes how to get various logs for Content Analyzer. - -### Installation: - -- Retreieve the Ansible installation logs: - -``` -kubectl logs deployment/ibm-cp4a-operator -c operator > Operator.log - -kubectl logs deployment/ibm-cp4a-operator -c ansible > Ansible.log -``` - -### Post install: - -- Content Analyzer logs are located in the log pvc. Logs are separated into sub-folders based on the component names. - -``` -├── backend -├── callerapi -├── classifyprocess-classify -├── frontend -├── mongo -├── mongoadmin -├── ocr-extraction -├── pdfprocess -├── postprocessing -├── processing-extraction -├── setup -├── updatefiledetail -└── utf8process - -``` - diff --git a/ACA/README_uninstall.md b/ACA/README_uninstall.md deleted file mode 100644 index e16e42ee..00000000 --- a/ACA/README_uninstall.md +++ /dev/null @@ -1,20 +0,0 @@ -# IBM® Business Automation Content Analyzer -========= - -## Introduction - -This readme provide instruction to deploy IBM Business Automation Content Analyzer with IBM® Cloud Pak for Automation platform. IBM Business Automation Content Analyzer offers the power of intelligent capture with the flexibility of an API that enables you to extend the value of your core enterprise content management (ECM) technology stack and helps you rapidly accelerate extraction and classification of data in your documents. - - -Uninstall ------------ -1. Backup your ontology. -2. In the CR yaml file: comment out `ca_configuration` section - -3. Apply the CR. For example: `oc apply -f [PATH TO CR YAML]` - -4. Delete the all the subdirectories under the CA Data PVC. - -5. Delete the all the subdirectories under the CA Config PVC. - -6. Delete the all the subdirectories under the CA Log PVC. \ No newline at end of file diff --git a/ACA/README_update.md b/ACA/README_update.md deleted file mode 100644 index 2651023c..00000000 --- a/ACA/README_update.md +++ /dev/null @@ -1,23 +0,0 @@ -# IBM® Business Automation Content Analyzer -========= - -## Introduction - -This readme provide instruction to update IBM Business Automation Content Analyzer with IBM® Cloud Pak for Automation platform. IBM Business Automation Content Analyzer offers the power of intelligent capture with the flexibility of an API that enables you to extend the value of your core enterprise content management (ECM) technology stack and helps you rapidly accelerate extraction and classification of data in your documents. - - - -## Redeploying Content Analyzer if changes are made to the Role Variables -If you need to make changes to CA deployment, you must redeploy CA by doing the following: - -Please note that this process will remove any documents you have processed in Content Analyzer. Please download any needed document output from Content Analyzer before performing these steps. - -1) In the CR yaml file: comment out `ca_configuration` section - -2) Apply the CR. For example: `oc apply -f [PATH TO CR YAML]` - -3) Delete the contents under the CA Data PVC and CA Config PVC. - -4) In the CR yaml file: uncomment `ca_configuration` section and make the desired changes. - -5) Apply the CR. For example: `oc apply -f [PATH TO CR YAML]` \ No newline at end of file diff --git a/ACA/README_upgrade.md b/ACA/README_upgrade.md deleted file mode 100644 index 5955e359..00000000 --- a/ACA/README_upgrade.md +++ /dev/null @@ -1,90 +0,0 @@ -# IBM® Business Automation Content Analyzer ------------ - - -## Introduction - -This readme provide instruction to deploy IBM Business Automation Content Analyzer with IBM® Cloud Pak for Automation platform. IBM Business Automation Content Analyzer offers the power of intelligent capture with the flexibility of an API that enables you to extend the value of your core enterprise content management (ECM) technology stack and helps you rapidly accelerate extraction and classification of data in your documents. - - -Upgrade ------------ -## Upgrade from 19.0.2 to 20.0.1 -Upgrade from Content Analyzer 19.0.2 to 20.0.1 is not supported. - -## Upgrade from 19.0.3 to 20.0.1 - -- In order to upgrade Content Analyzer from 19.0.3 to 20.0.1, the following procedure must be performed: -- Back up your ontology through the export functionality from the Contenat Analyzer UI. -- Back up your Content Analyzer's base database and tenant database. -- Copy the `DB2` [folder](https://github.com/icp4a/cert-kubernetes/tree/master/ACA/configuration-ha) to the Db2 server. -- Run the `UpgradeTenantDB.sh` from your database server as `db2inst1` user. -- Set the ObjectType feature flag and change the schema version flag to 1.4 for the tenant by doing the following for your Content Analyzer's base database. - 1. Start the DB2 commandline by running the `db2` command. - 2. On the DB2 commandline, connect to your Content Analyzer base database as the base database user. - 3. On the DB2 commandline, run the following SQL statements (replace the values of `` and `` with the actual values for your instance). -``` -update tenantinfo set FEATUREFLAGS=(4 | (select FEATUREFLAGS from tenantinfo where TENANTID='' and ONTOLOGY='')) where TENANTID='' and ONTOLOGY='' -update tenantinfo set TENANTDBVERSION=1.4 where TENANTID='' and ONTOLOGY='' -``` -- Fill out the CR yaml file supplied with 20.0.1 using the same values as the previous deployment. Note that you should use the same number of replicas for mongo/mongo-admin as was in 19.0.3 (e.g. 3). -- Change all existing secret names to the new format by running the following commands (this creates new secrets with the same information as the original secrets): -``` - oc get secret ca-backend-secret -o yaml|sed -e s#ca-backend-secret#aca-backend-secret# |oc apply -f - - oc get secret ca-frontend-secret -o yaml|sed -e s#ca-frontend-secret#aca-frontend-secret# |oc apply -f - - for sec in {basedb,mongo,mongo-admin,rabbitmq,redis,secrets};do oc get secret baca-$sec -oyaml|sed -e s#baca-$sec#aca-$sec#|oc apply -f -;done (note that the tag needs to be replaced with appropriate namespace) -``` -Change the name of the baca-dsn configmap to aca-dsn: -``` - oc get cm baca-dsn -o yaml | sed -e s#baca-dsn#aca-dsn# | oc apply -f - -``` -- In 19.0.3, the baca-basedb secret was created using an encoded password, which is no longer used in 20.0.1. To -patch the aca-basedb secret with an un-encoded password, run: - ``` - oc patch secret aca-basedb --type='json' -p='[{"op" : "replace" ,"path" : "/data/BASE_DB_PWD" ,"value" : '$(echo $(oc get secret aca-basedb -o yaml |grep BASE_DB_PWD | awk {'print $2'}) |base64 -d)'}]' -``` - - Re-label your worker nodes per step 5.2 of [README_config.md](README_config.md). For example: - ``` - oc label node celery=aca mongo=aca mongo-admin=aca --overwrite -``` -- If ACA integrates with UMS, do the next steps. - Issue the command: - ``` - oc edit cm {meta.name}-ca-config - ``` - Change the environment variable: -``` - UMS_REGISTERED: "false" -``` - - -- Deploy Content Analyzer 20.0.1 using Operator 20.0.1 per [Operator Readme](https://github.com/icp4a/cert-kubernetes/blob/master/README.md). - -- Apply the updated CR yaml file. - -NOTE: Make sure to keep the `csrf_referrer->whitelist: ""` blank if ACA is integrated with Business Automation Studio. - -- Monitor the pods and verify that the old pods terminate and new pods are created. -NOTE: You may need to delete the `redis` pods if they fail to start. For example: -``` -oc scale sts -redis-ha-server --replicas=0 -``` - -then - -``` -oc scale sts -redis-ha-server --replicas=3 - -``` - -- Log in to the Content Analyzer UI and import the ontology that was exported in the above step. - -- If any problems are encountered see Troubleshooting in [README_config.md](README_config.md). - -## Rolling back an upgrade -- Delete the current version of Content Analyzer by following the [README_uninstall.md](README_uninstall.md). -- Restore the Content Analyzer's Base DB and Tenant DB to the previous release. For example: If you want to rollback to 19.0.2, which has been previously backed up, restore the base DB and tenant DB to 19.0.2. -- Follow the installation procedure to deploy Content Analyzer for that specific version. - -## Limitation -After upgrading from Content Analyzer v1.3 to v1.4, any existing Key Alias Patterns in the system, which are **not** assigned to any KeyClass, are **lost** after the upgrade. The Value Patterns (irrespective of being assigned to a key class) and Key Alias Patterns that are assigned to any KeyClass are migrated successfully.  diff --git a/ACA/configuration-ha/DB2/AddOntology.sh b/ACA/configuration-ha/DB2/AddOntology.sh index 29f68640..41d43213 100755 --- a/ACA/configuration-ha/DB2/AddOntology.sh +++ b/ACA/configuration-ha/DB2/AddOntology.sh @@ -1,7 +1,11 @@ #!/bin/bash - -echo -echo "-- This script will create a new ontology for an existing tenant and load it with default data." -echo +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end +# This script will create a new ontology for an existing tenant and load it with default data. ./AddTenant.sh 1 \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/AddTenant.bat b/ACA/configuration-ha/DB2/AddTenant.bat index 17bee340..8eaf05a6 100644 --- a/ACA/configuration-ha/DB2/AddTenant.bat +++ b/ACA/configuration-ha/DB2/AddTenant.bat @@ -1,210 +1,219 @@ -@echo off - -SETLOCAL - -IF NOT DEFINED skip_create_tenant_db ( - set skip_create_tenant_db=false -) - -IF "%skip_create_tenant_db%"=="true" ( - echo -- - echo This script will initialize an existing DB2 database for use as a BACA tenant database and add an ontology. - set choice="2" - echo -- -) ELSE ( - echo -- - echo Enter '1' to create an new DB2 database and initialize the database as a tenant DB and create an ontology. An existing database user must exist. - echo Enter '2' to add an ontology for an existing tenant database. - echo Enter '3' to abort. - - set /p choice="Type input: " -) - - -if /I "%choice%" EQU "3" goto :DOEXIT - -set /p tenant_id= Enter the tenant ID for the new tenant: (eg. t4900) : - -IF NOT "%skip_create_tenant_db%"=="true" ( - set /p tenant_db_name= "Enter the name of the new DB2 database to create for the BACA tenant. Please follow the DB2 naming rules :" -) ELSE ( - set /p tenant_db_name= "Enter the name of the existing DB2 database to use for the BACA tenant database (eg. t4900) :" -) -set tenant_dsn_name=%tenant_db_name% - -set /p baca_database_server_ip= "Enter the host/IP of the DB2 database server for the tenant database. :" - -set /p baca_database_port= "Enter the port of the DB2 database server for the tenant database :" - -set /p tenant_db_user= "Please enter the name of tenant database user. If no value is entered we will use the following default value 'tenantuser' :" -IF NOT DEFINED tenant_db_user SET "tenant_db_user=tenantuser" - -REM Use powershell to mask password -set "psCommand=powershell -Command "$pword = read-host 'Enter the password for the tenant database user:' -AsSecureString ; ^ - $BSTR=[System.Runtime.InteropServices.Marshal]::SecureStringToBSTR($pword); ^ - [System.Runtime.InteropServices.Marshal]::PtrToStringAuto($BSTR)"" -for /f "usebackq delims=" %%p in (`%psCommand%`) do set tenant_db_pwd=%%p -REM Alternative way to prompt for pwd without masking -REM set /p tenant_db_pwd= "Enter the password for the tenant database user:" - -set /p tenant_ontology= "Enter the tenant ontology name. If nothing is entered, the default name will be used 'default' :" -IF NOT DEFINED tenant_ontology SET "tenant_ontology=default" - -set /p base_db_name= "Enter the name of the DB2 BACA Base database with the TENANTINFO Table. If nothing is entered, we will use the following default value 'CABASEDB': " -IF NOT DEFINED base_db_name SET "base_db_name=CABASEDB" - -set /p base_db_user= "Enter the name of the database user for the Base BACA database. If nothing is entered, we will use the following default value 'CABASEUSER' : " -IF NOT DEFINED base_db_user SET "base_db_user=CABASEUSER" - -set /p tenant_company= "Please enter the company name for the initial BACA user :" - -set /p tenant_first_name= "Please enter the first name for the initial BACA user :" - -set /p tenant_last_name= "Please enter the last name for the initial BACA user :" - -set /p tenant_email= "Please enter a valid email address for the initial BACA user : " - -set /p tenant_user_name= "Please enter the login name for the initial BACA user (IMPORTANT: if you are using LDAP, you must use the LDAP user name):" - -IF NOT DEFINED rdbmsconnection SET "rdbmsconnection=DSN=%tenant_dsn_name%;UID=%tenant_db_user%;PWD=%tenant_db_pwd%;" -set /p ssl= "Please enter if database is enabled for SSL default is false [Y/N] :" -if /I "%ssl%" EQU "Y" ( - SET rdbmsconnection=%rdbmsconnection%Security=SSL; -) -echo "-- Please confirm these are the desired settings:" -echo " - tenant ID: %tenant_id%" -echo " - tenant database name: %tenant_db_name%" -echo " - database server hostname/IP: %baca_database_server_ip%" -echo " - database server port: %baca_database_port%" -echo " - tenant database user: %tenant_db_user%" -echo " - ontology name: %tenant_ontology%" -echo " - base database: %base_db_name%" -echo " - base database user: %base_db_user%" -echo " - tenant company name: %tenant_company%" -echo " - tenant first name: %tenant_first_name%" -echo " - tenant last name: %tenant_last_name%" -echo " - tenant email address: %tenant_email%" -echo " - tenant login name: %tenant_user_name%" -echo " - tenant ssl: %ssl%" - -set /P c=Are you sure you want to continue[Y/N]? -if /I "%c%" EQU "Y" goto :DOCREATE -if /I "%c%" EQU "N" goto :DOEXIT - -:DOCREATE - echo "Running the db script" - REM adding new teneant db need to create db first - IF "%choice%"=="1" ( - echo "Creating database" - db2 CREATE DATABASE %tenant_db_name% AUTOMATIC STORAGE YES USING CODESET UTF-8 TERRITORY DEFAULT COLLATE USING SYSTEM PAGESIZE 32768 - db2 CONNECT TO %tenant_db_name% - db2 GRANT CONNECT,DATAACCESS ON DATABASE TO USER %tenant_db_user% - db2 GRANT USE OF TABLESPACE USERSPACE1 TO USER %tenant_db_user% - db2 CONNECT RESET - ) - - REM create schema - echo -- - echo "Connecting to db and creating schema" - db2 CONNECT TO %tenant_db_name% - db2 CREATE SCHEMA %tenant_ontology% - db2 SET SCHEMA %tenant_ontology% - - REM create tables - echo -- - echo "Creating BACA tables" - db2 -stvf sql\CreateBacaTables.sql - - REM table permissions to tenant user - echo -- - echo "Giving permissions on tables" - db2 GRANT ALTER ON TABLE DOC_CLASS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE DOC_ALIAS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE OBJECT_TYPE TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE KEY_CLASS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE KEY_ALIAS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE CWORD TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE HEADING TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE HEADING_ALIAS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE USER_DETAIL TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE INTEGRATION TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE IMPORT_ONTOLOGY TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE API_INTEGRATIONS_OBJECTSSTORE TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE SMARTPAGES_OPTIONS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE FONTS TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE FONTS_TRANSID TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE DB_BACKUP TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE PATTERN TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE DOCUMENT TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE TRAINING_LOG TO USER %tenant_db_user% - db2 GRANT ALTER ON TABLE IMPLEMENTATION TO USER %tenant_db_user% - - REM load the tenant Db - echo "Loading default data into tables" - db2 load from CSVFiles\doc_class.csv of del insert into doc_class - db2 load from CSVFiles\object_type.csv of del modified by identityoverride insert into object_type - db2 load from CSVFiles\key_class.csv of del modified by identityoverride insert into key_class - db2 load from CSVFiles\doc_alias.csv of del modified by identityoverride insert into doc_alias - db2 load from CSVFiles\key_alias.csv of del modified by identityoverride insert into key_alias - db2 load from CSVFiles\cword.csv of del modified by identityoverride insert into cword - db2 load from CSVFiles\heading.csv of del modified by identityoverride insert into heading - db2 load from CSVFiles\heading_alias.csv of del modified by identityoverride insert into heading_alias - db2 load from CSVFiles\key_class_dc.csv of del modified by identityoverride insert into key_class_dc - db2 load from CSVFiles\doc_alias_dc.csv of del modified by identityoverride insert into doc_alias_dc - db2 load from CSVFiles\key_alias_dc.csv of del modified by identityoverride insert into key_alias_dc - db2 load from CSVFiles\key_alias_kc.csv of del modified by identityoverride insert into key_alias_kc - db2 load from CSVFiles\heading_dc.csv of del modified by identityoverride insert into heading_dc - db2 load from CSVFiles\heading_alias_dc.csv of del modified by identityoverride insert into heading_alias_dc - db2 load from CSVFiles\heading_alias_h.csv of del modified by identityoverride insert into heading_alias_h - db2 load from CSVFiles\cword_dc.csv of del modified by identityoverride insert into cword_dc - - echo -- - echo "SET INTEGRITY ..." - db2 set integrity for key_class immediate checked - db2 set integrity for key_class_dc immediate checked - db2 set integrity for doc_alias_dc immediate checked - db2 set integrity for key_alias_dc immediate checked - db2 set integrity for key_alias_kc immediate checked - db2 set integrity for heading_dc immediate checked - db2 set integrity for heading_alias_dc immediate checked - db2 set integrity for heading_alias_h immediate checked - db2 set integrity for cword_dc immediate checked - - echo -- - echo "ALTER TABLE ..." - db2 alter table doc_class alter column doc_class_id restart with 10 - db2 alter table doc_alias alter column doc_alias_id restart with 11 - db2 alter table key_class alter column key_class_id restart with 202 - db2 alter table key_alias alter column key_alias_id restart with 239 - db2 alter table cword alter column cword_id restart with 76 - db2 alter table heading alter column heading_id restart with 3 - db2 alter table heading_alias alter column heading_alias_id restart with 3 - db2 alter table object_type alter column object_type_id restart with 6 - - db2 connect reset - - REM Insert InsertTenant - echo -- - echo "Connecting to base database to insert tenant info" - db2 connect to %base_db_name% - db2 set schema %base_db_user% - db2 insert into TENANTINFO (tenantid,ontology,tenanttype,dailylimit,rdbmsengine,bacaversion,rdbmsconnection,dbname,dbuser,tenantdbversion,featureflags) values ( '%tenant_id%', '%tenant_ontology%', 0, 0, 'DB2', '1.4', encrypt('%rdbmsconnection%','AES_KEY'),'%tenant_db_name%','%tenant_db_user%','1.4',4) - db2 connect reset - - REM Insert InsertUser - echo -- - echo "Connecting to tenant database to insert initial userinfo" - db2 connect to %tenant_db_name% - db2 set schema %tenant_ontology% - db2 insert into user_detail (email,first_name,last_name,user_name,company,expire) values ('%tenant_email%','%tenant_first_name%','%tenant_last_name%','%tenant_user_name%','%tenant_company%',10080) - db2 insert into login_detail (user_id,role,status,logged_in) select user_id,'Admin','1',0 from user_detail where email='%tenant_email%' - db2 connect reset - goto END -:DOEXIT - echo "Exited on user input" - goto END -:END - SET skip_create_tenant_db= - echo "END" - -ENDLOCAL +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SETLOCAL + +IF NOT DEFINED skip_create_tenant_db ( + set skip_create_tenant_db=false +) + +IF "%skip_create_tenant_db%"=="true" ( + echo -- + echo This script will initialize an existing DB2 database for use as a BACA tenant database and add an ontology. + set choice="2" + echo -- +) ELSE ( + echo -- + echo Enter '1' to create an new DB2 database and initialize the database as a tenant DB and create an ontology. An existing database user must exist. + echo Enter '2' to add an ontology for an existing tenant database. + echo Enter '3' to abort. + + set /p choice="Type input: " +) + + +if /I "%choice%" EQU "3" goto :DOEXIT + +set /p tenant_id= Enter the tenant ID for the new tenant: (eg. t4900) : + +IF NOT "%skip_create_tenant_db%"=="true" ( + set /p tenant_db_name= "Enter the name of the new DB2 database to create for the BACA tenant. Please follow the DB2 naming rules :" +) ELSE ( + set /p tenant_db_name= "Enter the name of the existing DB2 database to use for the BACA tenant database (eg. t4900) :" +) +set tenant_dsn_name=%tenant_db_name% + +set /p baca_database_server_ip= "Enter the host/IP of the DB2 database server for the tenant database. :" + +set /p baca_database_port= "Enter the port of the DB2 database server for the tenant database :" + +set /p tenant_db_user= "Please enter the name of tenant database user. If no value is entered we will use the following default value 'tenantuser' :" +IF NOT DEFINED tenant_db_user SET "tenant_db_user=tenantuser" + +REM Use powershell to mask password +set "psCommand=powershell -Command "$pword = read-host 'Enter the password for the tenant database user:' -AsSecureString ; ^ + $BSTR=[System.Runtime.InteropServices.Marshal]::SecureStringToBSTR($pword); ^ + [System.Runtime.InteropServices.Marshal]::PtrToStringAuto($BSTR)"" +for /f "usebackq delims=" %%p in (`%psCommand%`) do set tenant_db_pwd=%%p +REM Alternative way to prompt for pwd without masking +REM set /p tenant_db_pwd= "Enter the password for the tenant database user:" + +set /p tenant_ontology= "Enter the tenant ontology name. If nothing is entered, the default name will be used 'default' :" +IF NOT DEFINED tenant_ontology SET "tenant_ontology=default" + +set /p base_db_name= "Enter the name of the DB2 BACA Base database with the TENANTINFO Table. If nothing is entered, we will use the following default value 'CABASEDB': " +IF NOT DEFINED base_db_name SET "base_db_name=CABASEDB" + +set /p base_db_user= "Enter the name of the database user for the Base BACA database. If nothing is entered, we will use the following default value 'CABASEUSER' : " +IF NOT DEFINED base_db_user SET "base_db_user=CABASEUSER" + +set /p tenant_company= "Please enter the company name for the initial BACA user :" + +set /p tenant_first_name= "Please enter the first name for the initial BACA user :" + +set /p tenant_last_name= "Please enter the last name for the initial BACA user :" + +set /p tenant_email= "Please enter a valid email address for the initial BACA user : " + +set /p tenant_user_name= "Please enter the login name for the initial BACA user (IMPORTANT: if you are using LDAP, you must use the LDAP user name):" + +IF NOT DEFINED rdbmsconnection SET "rdbmsconnection=DSN=%tenant_dsn_name%;UID=%tenant_db_user%;PWD=%tenant_db_pwd%;" +set /p ssl= "Please enter if database is enabled for SSL default is false [Y/N] :" +if /I "%ssl%" EQU "Y" ( + SET rdbmsconnection=%rdbmsconnection%Security=SSL; +) +echo "-- Please confirm these are the desired settings:" +echo " - tenant ID: %tenant_id%" +echo " - tenant database name: %tenant_db_name%" +echo " - database server hostname/IP: %baca_database_server_ip%" +echo " - database server port: %baca_database_port%" +echo " - tenant database user: %tenant_db_user%" +echo " - ontology name: %tenant_ontology%" +echo " - base database: %base_db_name%" +echo " - base database user: %base_db_user%" +echo " - tenant company name: %tenant_company%" +echo " - tenant first name: %tenant_first_name%" +echo " - tenant last name: %tenant_last_name%" +echo " - tenant email address: %tenant_email%" +echo " - tenant login name: %tenant_user_name%" +echo " - tenant ssl: %ssl%" + +set /P c=Are you sure you want to continue[Y/N]? +if /I "%c%" EQU "Y" goto :DOCREATE +if /I "%c%" EQU "N" goto :DOEXIT + +:DOCREATE + echo "Running the db script" + REM adding new teneant db need to create db first + IF "%choice%"=="1" ( + echo "Creating database" + db2 CREATE DATABASE %tenant_db_name% AUTOMATIC STORAGE YES USING CODESET UTF-8 TERRITORY DEFAULT COLLATE USING SYSTEM PAGESIZE 32768 + db2 CONNECT TO %tenant_db_name% + db2 GRANT CONNECT,DATAACCESS ON DATABASE TO USER %tenant_db_user% + db2 GRANT USE OF TABLESPACE USERSPACE1 TO USER %tenant_db_user% + db2 CONNECT RESET + ) + + REM create schema + echo -- + echo "Connecting to db and creating schema" + db2 CONNECT TO %tenant_db_name% + db2 CREATE SCHEMA %tenant_ontology% + db2 SET SCHEMA %tenant_ontology% + + REM create tables + echo -- + echo "Creating BACA tables" + db2 -stvf sql\CreateBacaTables.sql + + REM table permissions to tenant user + echo -- + echo "Giving permissions on tables" + db2 GRANT ALTER ON TABLE DOC_CLASS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE DOC_ALIAS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE OBJECT_TYPE TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE KEY_CLASS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE KEY_ALIAS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE ALIAS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE CWORD TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE HEADING TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE HEADING_ALIAS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE USER_DETAIL TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE INTEGRATION TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE IMPORT_ONTOLOGY TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE API_INTEGRATIONS_OBJECTSSTORE TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE SMARTPAGES_OPTIONS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE FONTS TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE FONTS_TRANSID TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE PATTERN TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE DOCUMENT TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE TRAINING_LOG TO USER %tenant_db_user% + db2 GRANT ALTER ON TABLE IMPLEMENTATION TO USER %tenant_db_user% + + REM load the tenant Db + echo "Loading default data into tables" + db2 load from CSVFiles\doc_class.csv of del insert into doc_class + db2 load from CSVFiles\object_type.csv of del modified by identityoverride insert into object_type + db2 load from CSVFiles\key_class.csv of del modified by identityoverride insert into key_class + db2 load from CSVFiles\doc_alias.csv of del modified by identityoverride insert into doc_alias + db2 load from CSVFiles\key_alias.csv of del modified by identityoverride insert into key_alias + db2 load from CSVFiles\cword.csv of del modified by identityoverride insert into cword + db2 load from CSVFiles\heading.csv of del modified by identityoverride insert into heading + db2 load from CSVFiles\heading_alias.csv of del modified by identityoverride insert into heading_alias + db2 load from CSVFiles\key_class_dc.csv of del modified by identityoverride insert into key_class_dc + db2 load from CSVFiles\doc_alias_dc.csv of del modified by identityoverride insert into doc_alias_dc + db2 load from CSVFiles\key_alias_dc.csv of del modified by identityoverride insert into key_alias_dc + db2 load from CSVFiles\key_alias_kc.csv of del modified by identityoverride insert into key_alias_kc + db2 load from CSVFiles\heading_dc.csv of del modified by identityoverride insert into heading_dc + db2 load from CSVFiles\heading_alias_dc.csv of del modified by identityoverride insert into heading_alias_dc + db2 load from CSVFiles\heading_alias_h.csv of del modified by identityoverride insert into heading_alias_h + db2 load from CSVFiles\cword_dc.csv of del modified by identityoverride insert into cword_dc + + echo -- + echo "SET INTEGRITY ..." + db2 set integrity for key_class immediate checked + db2 set integrity for key_class_dc immediate checked + db2 set integrity for doc_alias_dc immediate checked + db2 set integrity for key_alias_dc immediate checked + db2 set integrity for key_alias_kc immediate checked + db2 set integrity for heading_dc immediate checked + db2 set integrity for heading_alias_dc immediate checked + db2 set integrity for heading_alias_h immediate checked + db2 set integrity for cword_dc immediate checked + + echo -- + echo "ALTER TABLE ..." + db2 alter table doc_class alter column doc_class_id restart with 10 + db2 alter table doc_alias alter column doc_alias_id restart with 11 + db2 alter table key_class alter column key_class_id restart with 202 + db2 alter table key_alias alter column key_alias_id restart with 239 + db2 alter table cword alter column cword_id restart with 76 + db2 alter table heading alter column heading_id restart with 3 + db2 alter table heading_alias alter column heading_alias_id restart with 3 + db2 alter table object_type alter column object_type_id restart with 6 + + db2 connect reset + + REM Insert InsertTenant + echo -- + echo "Connecting to base database to insert tenant info" + db2 connect to %base_db_name% + db2 set schema %base_db_user% + db2 insert into TENANTINFO (tenantid,ontology,tenanttype,dailylimit,rdbmsengine,bacaversion,rdbmsconnection,dbname,dbuser,tenantdbversion,featureflags) values ( '%tenant_id%', '%tenant_ontology%', 0, 0, 'DB2', '1.5', encrypt('%rdbmsconnection%','AES_KEY'),'%tenant_db_name%','%tenant_db_user%','1.5',4) + db2 connect reset + + REM Insert InsertUser + echo -- + echo "Connecting to tenant database to insert initial userinfo" + db2 connect to %tenant_db_name% + db2 set schema %tenant_ontology% + db2 insert into user_detail (email,first_name,last_name,user_name,company,expire) values ('%tenant_email%','%tenant_first_name%','%tenant_last_name%','%tenant_user_name%','%tenant_company%',10080) + db2 insert into login_detail (user_id,role,status,logged_in) select user_id,'Admin','1',0 from user_detail where email='%tenant_email%' + db2 connect reset + goto END +:DOEXIT + echo "Exited on user input" + goto END +:END + SET skip_create_tenant_db= + echo "END" + +ENDLOCAL diff --git a/ACA/configuration-ha/DB2/AddTenant.sh b/ACA/configuration-ha/DB2/AddTenant.sh index 4f012f24..a8527ccc 100755 --- a/ACA/configuration-ha/DB2/AddTenant.sh +++ b/ACA/configuration-ha/DB2/AddTenant.sh @@ -1,5 +1,11 @@ #!/bin/bash - +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end # NOTES: # This script will create a DB2 database and initialize the database for a Content Analyzer tenant and load it with default data. # If you prefer to create your own database, and only want the script to initialize the existing database, @@ -22,22 +28,30 @@ if [[ "$NUMARGS" -gt 0 ]]; then use_existing_tenant=$1 fi -if [[ -z "$use_existing_tenant" || $use_existing_tenant -ne 1 ]]; then - if [[ -z "$tenant_db_exists" || $tenant_db_exists != "true" ]]; then - echo - echo "==================================================" - echo - echo -e "\nThis script will create a DB2 database and initialize the database for a Content Analyzer tenant and load it with default data." - echo - echo -e "If you prefer to create your own database, and only want the script to initialize the existing database, please exit this script and run 'InitTenantDB.sh'." - echo - echo "==================================================" - echo +if [[ ! -z "$use_existing_tenant" && $use_existing_tenant -eq 1 ]]; then + tenant_db_exists="true" + user_already_defined=1 + create_new_user="n" +fi + + +echo +echo "==================================================" +echo +if [[ -z "$tenant_db_exists" || $tenant_db_exists != "true" ]]; then + echo -e "\nThis script will create a DB2 database and initialize the database for a Content Analyzer tenant and load it with default data." + echo + echo -e "If you prefer to create your own database, and only want the script to initialize the existing database, please exit this script and run 'InitTenantDB.sh'." +else + if [[ -z "$use_existing_tenant" || $use_existing_tenant -ne 1 ]]; then + echo -e "This script will initialize an existing database for a Content Analyzer tenant and load it with default data." else - echo -e "\n-- This script will initialize an existing database for a Content Analyzer tenant and load it with default data" - echo + echo -e "This script will add an ontology to an existing Content Analyzer tenant and load it with default data." fi fi +echo +echo "==================================================" +echo if [[ -z "$use_existing_tenant" || $use_existing_tenant -ne 1 ]]; then echo "Enter the tenant ID for the new tenant: (eg. t4900)" @@ -77,7 +91,7 @@ echo if [[ -z "$tenant_db_exists" || $tenant_db_exists != "true" ]]; then echo "Enter the name of the new Content Analyzer Tenant database to create: " else - echo "Enter the name of an existing DB2 database to be used as the Content Analyzer Tenant database: " + echo "Enter the name of an existing DB2 database for the Content Analyzer Tenant database: " fi while [[ $tenant_db_name == '' ]] do diff --git a/ACA/configuration-ha/DB2/CSVFiles/key_class.csv b/ACA/configuration-ha/DB2/CSVFiles/key_class.csv index b9574fa0..959b9ae0 100644 --- a/ACA/configuration-ha/DB2/CSVFiles/key_class.csv +++ b/ACA/configuration-ha/DB2/CSVFiles/key_class.csv @@ -1,201 +1,201 @@ -47,ExteriorColor,5,0,0,Exterior Color,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXh0ZXJpb3IgQ29sb3IiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -48,InteriorColor,5,0,0,Interior Color,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW50ZXJpb3IgQ29sb3IiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -49,Page Number,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGFnZSAjOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -50,JobDescription,5,0,0,Job Description,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSm9iIERlc2NyaXB0aW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -51,SBCess,4,0,0,Swachh Bharat Cess,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU0IgQ2VzcyBvbiBUYXhhYmxlIFZhbHVlIFtCXSIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IlNlcnZpY2UgVGF4IG9uIFRheGFibGUgVmFsdWUgW0JdIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiU0IgQ2VzcyBsZXZpZWQgYnkgVmVuZG9yIFtBXSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -52,InvoiceNumber,4,1,0,Invoice Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW52IE5vIyIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ikludm9pY2UgTnVtYmVyOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ikludm9pY2UgIyIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ikludm9pY2UgTnVtYmVyIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiVEFYIElOVk9JQ0UgTlVNQkVSIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSW52IE5vICM6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSW52IE5pICM6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -53,Fax,4,0,0,FaxNo,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -54,Total,4,1,0,Grand Total,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVG90YWwgQ29zdCIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IlRPVEFMIElOQyBHU1Q6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiVG90YWwiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJJTlZPSUNFIFRPVEFMIElOQ0xVRElORyBHU1QiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJUb3RhbCBJbnZvaWNlIFZhbHVlIChScy4pIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -55,Address,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT2ZmaWNlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQWRkcmVzcyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -56,WorkSite,5,0,0,Work Site,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV29yayBTaXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -57,SalesPerson,5,0,0,,e30= -58,Brand,5,0,0,Brand,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmQiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -59,Website,5,0,0,Website Address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV2Vic2l0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -60,EmailAddress,5,0,0,Email address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQVRUT1JORVkiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJFbWFpbCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -61,MatterNumber,5,0,0,Matter Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWF0dGVyIE51bWJlciIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ik1hdHRlciBOdW1iZXI6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -62,RegdOffice,5,0,0,Regd Office,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVnZC4gT2ZmaWNlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -63,Terms,5,0,0,Payment Terms,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGVybXMiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJQYXltZW50IFRlcm1zIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -64,EstNo,4,0,0,Est No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXN0IE5vIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiRXN0IE5pOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -65,EstDate,4,0,0,Est Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXN0IERhdGUiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJFc3QgRGF0ZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -66,CampaignName,5,0,0,Campaign Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2FtcGFpZ24gTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -67,ServiceTax,4,0,0,Service Tax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBUYXggbGV2aWVkIGJ5IFZlbmRvciBbQV0iLCJMYW5ndWFnZSI6ImVuIn1dfX0= -68,AgencyCommission,4,0,0,Agency Commission,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWdlbmN5IENvbW1pc3Npb24iLCJMYW5ndWFnZSI6ImVuIn1dfX0= -69,BeneficiaryName,5,0,0,Beneficiary Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQmVuZWZpY2lhcnkgTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -70,Sub Brand,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3ViIEJyYW5kIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -71,PANNo,4,0,0,PAN NO,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUEFOIE5POiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -72,Credit,5,0,0,Credit,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ3JlZGl0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -73,CINNo,4,0,0,CIN No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ0lOIE5vOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -74,SwiftCode,4,0,1,Swift Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3dpZnQgQ29kZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -75,CustName,5,0,1,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVG86IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ2xpZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ3VzdG9tZXIgTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -76,Telephone,4,0,0,Telephone,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGVsIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiVGVsZXBob25lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -77,BankName,5,0,0,Bank Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQkFOSyBOYW1lOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkJhbmsiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -78,Price,4,0,0,Price,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJpY2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -79,Qty,4,0,0,Quantity,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUXR5IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -80,Description,5,0,0,Description,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGVzY3JpcHRpb24iLCJMYW5ndWFnZSI6ImVuIn1dfX0= -81,GLCode,4,0,0,GL Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiR0wgQ29kZSAvIEl0ZW0iLCJMYW5ndWFnZSI6ImVuIn1dfX0= -82,SoldTo,5,0,0,Sold To,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU29sZCBUbyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -83,ABN,5,0,0,ABN number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQUJOIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -84,Phone,4,0,0,Phone no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGhvbmUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -85,Regarding,5,0,0,Regarding,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVnYXJkaW5nIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUkU6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -86,RequestingManager,5,0,0,Requesting Manager,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVxdWVzdGluZyBNYW5hZ2VyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -87,InvoiceDate,4,0,0,Invoice Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW52IERhdGUiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJEYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSU5WT0lDRSBEQVRFIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -88,DueDate,4,0,0,Due Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRFVFIERBVEUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -89,AccNo,4,0,0,Account Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWNjb3VudCBObzoiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJBY2NvdW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQWNjdCBObyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -90,BSB,4,0,0,BSB No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQlNCIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -91,AccName,5,0,0,Account Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWNjdCBOYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQWNjb3VudCBOYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -92,SubTotal,4,0,0,Sub Total before tax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3ViIFRvdGFsIChScy4pIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiU1VCVE9UQUw6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSU5WT0lDRSBUT1RBTCBFWENMVURJTkcgR1NUIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -93,Tax,4,0,0,Tac amounts,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoic2FsZXMgdGF4IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiR1NUIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -94,PurchaseNo,4,0,0,Purchase number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUC5PLiBOdW1iZXIiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJPcmRlciBOdW1iZXIiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJPcmRlciAjIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUHVyY2hhc2UgTmJyIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUE8gTnVtYmVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -95,ShipTo,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2hpcCBUbzoiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -96,BranchOffice,5,0,0,Branch Office,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmNoIE9mZmljZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -97,IFSCCode,4,0,0,IFSC Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSUZTQyBDb2RlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -98,CentralisedBillingAndAccOffice,5,0,0,Centralised Billing and Accounting Office,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2VudHJhbGlzZWQgQmlsbGluZyBhbmQgQWNjb3VudGluZyBPZmZpY2U6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -99,ServiceTaxCategory,5,0,0,Service Tax Category,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBUYXggQ2F0ZWdvcnk6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -100,ServiceTaxRegnNo,4,0,0,Service Tax Regn No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBUYXggUmVnbiBObzoiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -101,Branch,5,0,0,Branch,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmNoIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQnJhbmNoOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -168,StartDate,5,0,0,Start Date of Minimum payment period per service component,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhcnQgRGF0ZSBvZiBNaW5pbXVtIHBheW1lbnQgcGVyaW9kIHBlciBzZXJ2aWNlIGNvbXBvbmVudCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -169,ZipCode,4,0,0,Zip Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiWmlwIENvZGUiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJaaS4gQ29kZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -170,ServiceComponents,5,0,0,Service Components,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBDb21wb25lbnRzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -171,ExistingCircuitIds,4,0,0,existing circuit ids,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiZXhpc3RpbmcgY2lyY3VpdCBpZHMiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -172,SignedDate,4,0,0,Signed Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -173,StateProvince,5,0,0,State Province,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdGUvUHJvdmluY2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -174,Attention,5,0,0,Attention,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXR0biIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -175,Country,5,0,0,Country,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ291bnRyeSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -176,ReferenceNo,4,0,0,Reference No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTUEgUmVmZXJlbmNlIE5vLiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkFUJlQgUEEgUmVmZXJlbmNlIE5vLjoiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJBVCZUIFBTIFJlZmVyZW5jZSBOby46IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUFMvQ1NBIFJlZmVyZW5jZSBOby4iLCJMYW5ndWFnZSI6ImVuIn1dfX0= -177,PreExistingContractNo,4,0,0,Pre Existing Contract No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicHJlLWV4aXN0aW5nIENvbnRyYWN0IG5vIChtdXN0IGJlIGluY2x1ZGVkKSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -178,AccNo,4,0,0,AccountNumber,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiYWNjb3VudCBudW1iZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -179,PercMonthlyFee,5,0,0,Percentage of Monthly Fee,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2FsY3VsYXRpb24gb2YgZWFybHkgdGVybWluYXRpb24gY2hhcmdlcyoiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -180,Customer,5,0,0,Customer,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ3VzdG9tZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -181,SDAcode,4,0,0,SDA code,e30= -182,ContractIDNo,4,0,0,contract id no,e30= -183,DS1No,4,0,0,ds1 no,e30= -184,PRINo,5,0,0,PRI No,e30= -185,City,5,0,0,City,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2l0eSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -186,SalesRegion,5,0,0,Sales Region,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2FsZXMgUmVnaW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -187,Name,5,0,0,Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -188,Title,5,0,0,Title,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGl0bGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -189,PricingTerm,5,0,0,Pricing Schedule Term,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJpY2luZyBTY2hlZHVsZSBUZXJtIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -190,SalesStrata,5,0,0,Sales Strata,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2FsZXMgU3RyYXRhIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -191,SalesBranchManager,5,0,0,Sales Branch Manager,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2FsZXMgLyBCcmFuY2ggTWFuYWdlciIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkJyYW5jaCBNYW5hZ2VyOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -192,EmailAddress,5,0,0,Email Address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW1haWwiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -193,TeleFax,4,0,0,Telephone and Fax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiRXhpc3RpbmcgU2VydmljZSIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IlRlbGVwaG9uZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -194,StreetAddress,5,0,0,Street Address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RyZWV0IEFkZHJlc3MiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -195,MinPayPeriod,5,0,0,Minimum Payment Period,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicGVyIFNlcnZpY2UgQ29tcG9uZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -196,ProgramCode,4,0,0,Program Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvZ3JhbSBDb2RlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -197,SCVPName,5,0,0,SCVP Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoic2N2cCBuYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -198,RatesForMinPayment,5,0,0,Rates following the end of minimum payment,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmF0ZXMgZm9sbG93aW5nIHRoZSBlbmQgb2YgbWluaW11bSBwYXltZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -199,Branch Transit Number,4,0,1,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmNoIFRyYW5zaXQgTm8uIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQnJhbmNoIFRyYW5zaXQgTm8uOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -200,RateStabilization,5,0,0,Rate Stabilization per service component,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmF0ZSBTdGFiaWxpemF0aW9uIHBlciBzZXJ2aWNlIGNvbXBvbmVudCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -201,EfffectiveDate,5,0,0,Effective Date of this pricing schedule,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRWZmZWN0aXZlIERhdGUgb2YgdGhpcyBwcmljaW5nIHNjaGVkdWxlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -46,MileageIn,4,0,0,Mileage In,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWlsZWFnZSBJbiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -102,Attention,5,0,0,Attention,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXR0biIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -103,DOB,4,0,0,Date of Birth,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSBvZiBCaXJ0aCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -104,StartDate,4,0,0,Start Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhcnQgRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -105,Title,5,0,0,Title,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGl0bGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -106,PlaceOfBirth,5,0,0,Place of Birth,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGxhY2Ugb2YgQmlydGgiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -107,Status,5,0,0,Status,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdHVzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -108,Employee,5,0,0,Employee,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW1wbG95ZWUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -109,FullName,5,0,0,Full Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRnVsbCBOYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -110,Subject,5,0,0,Subject,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3ViamVjdCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -111,AnnualSalary,4,0,0,Annual Salary,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQW5udWFsIFNhbGFyeSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -112,Citizenship,5,0,0,Citizenship,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2l0aXplbnNoaXAiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -113,ExpireDate,4,0,0,Expire Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXhwaXJlIERhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -114,PassportNo,4,0,0,Passport no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGFzc3BvcnQgbm8uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -115,Gender,5,0,0,Gender,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiR2VuZGVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -116,IssueDate,4,0,0,Issue Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSXNzdWUgRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -117,Smoking Status,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU21va2luZyBTdGF0dXM6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -118,ServiceDept,5,0,0,Service Department,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBEZXB0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -119,PCP,5,0,0,PCP,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUENQIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -120,ProgressNotes,5,0,0,Progress Notes,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvZ3Jlc3MgTm90ZXMiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -121,AppointmentFacility,5,0,0,Appointment Facility,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXBwb2ludG1lbnQgRmFjaWxpdHkiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -122,Referring,5,0,0,Referring,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVmZXJyaW5nIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -123,MedPrimary,5,0,0,med primary,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoibWVkIHByaW1hcnkiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -124,Prescription,5,0,0,prescription,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicHJlc2NyaXB0aW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -125,PrimaryCareProvider,5,0,0,primary care provider,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicHJpbWFyeSBjYXJlIHByb3ZpZGVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -126,Telephone,4,0,0,Telephone,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGVsZXBob25lIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUGgiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJIb3JtZToiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJIb3JuZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -127,FaxNo,4,0,0,Fax Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -128,NPI,4,0,0,NPI,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTlBJIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -129,FollowUp,5,0,0,Follow Up,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRm9sbG93IFVwIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ2xhaW0iLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJEaXZpc2lvbiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Imxsw6NvbGxvdyBVcCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -130,Name,5,0,0,Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -131,Diabetes,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGlhYmV0ZXMiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -132,AppointmentDateTime,4,0,0,Appt. Date/Time,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXBwdC4gRGF0ZS9UaW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -133,DOB,4,0,0,Date of Birth,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRE9CIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -134,Marital status,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWFyaXRhbCBzdGF0dXMiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -135,Alcohol intake,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWxjb2hvbCBpbnRha2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -136,Hypertension,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSHlwZXJ0ZW5zaW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -137,Occupation,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT2NjdXBhdGlvbiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -138,Kidney Stones,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiS2lkbmV5IFN0b25lcyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -139,Celebrex,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ0VMRUJSRVg6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ8OLTEVCUkVYOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkNFSS5FQlJpPVg6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -140,Employer,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW1wbG95ZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -141,Vitals,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVml0YWxzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -142,ROS,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUk9TIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -143,Quantity,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUXR5OiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -144,Refills,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVmaWxsczoiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -145,BodyMassIndex,4,0,0,Body Mass Index (BMI),eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQk1JIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -146,Weight,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV3QiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -147,EncounterDate,4,0,0,Encounter Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW5jb3VudGVyIERhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -148,Provider,5,0,0,Provider,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvdmlkZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -149,Insurance,5,0,0,Insurance,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW5zdXJhbmNlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -150,Client,5,0,0,Client,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2xpZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ2xpZW50IE5hbWUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -151,InvestigatingAgency,5,0,0,Investigating Agency,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW52ZXN0aWdhdGluZyBBZ2VuY3kiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -152,County,5,0,0,County,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ291bnR5IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -153,Parties,5,0,0,Parties,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUEFSVFkgMToiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -154,TransactionNo,4,0,0,Transaction Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVHJhbnNhY3Rpb24gIyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -155,Date,4,0,0,Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -156,TimeofLoss,4,0,0,Time of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVElNRSBPRiBMT1NTOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -157,ClaimNo,4,0,0,Claim Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2xhaW0iLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJDbGFpbSBObyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -158,State,5,0,0,State,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -159,DateOfLoss,4,0,0,Date Of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSBPZiBMb3NzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -160,DriverLicense,4,0,0,Driver License No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRHJpdmVyIExpY2Vuc2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -161,Street,5,0,0,Street,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RyZWV0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -162,Division,5,0,0,Division,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRElWSVNJT046IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiRGl2aXNpb24gQ29kZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -163,Adjuster,5,0,0,Adjuster,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQURKVVNURVI6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -164,ReportNumber,4,0,0,Report Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVwb3J0IE51bWJlciIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -165,ReportType,5,0,0,Report Type,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVwb3J0IFR5cGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -166,Tag,5,0,0,Tag,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGFnIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -167,City,5,0,0,City,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2l0eSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -1,InvestmentName,5,1,0,,e30= -2,InvestorName,5,1,0,,e30= -3,CapBalance,4,1,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2FwaXRhbCBCYWxhbmNlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiYW1vdW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiY2FwaXRhbCIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6ImJhbGFuY2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -4,FundAsOfDate,4,1,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRnVuZCBhcyBvZiBkYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoicGVyaW9kIGVuZCBkYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -5,IssuedDate,4,0,0,Issued Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSXNzdWVkIERhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -6,IssuedAt,5,0,0,Issued At,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSXNzdWVkIEF0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -7,Master,5,0,0,Master/Captain,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWFzdGVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -8,Shipper,5,0,0,Shipper,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2hpcHBlciIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -9,BLNo,5,0,0,Bill of Lading number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQkwgTk86IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -10,Flag,5,0,0,Flag,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmxhZyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -11,Consignee,5,0,0,Consignee,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ29uc2lnbmVlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ29uc2lnbmVlOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -12,VoyageNo,4,0,0,Voyage No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVm95YWdlIE5vIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -13,NotifyParty,5,0,0,Notify Party,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTm90aWZ5IFBhcnR5IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -14,OnboardTanker,5,0,0,OnboardTanker,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT24gYm9hcmQgdGhlIFRhbmtlciIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -15,LoadingPort,5,0,0,Loading Port,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTG9hZGluZyBQb3J0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -16,DeliveryPort,5,0,0,Delivery Port,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVG8gYmUgZGVsaXZlcmVkIHRvIHRoZSBwb3J0IG9mIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -17,Adjuster,5,0,0,Adjuster,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWRqdXN0ZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -18,WrittenBy,5,0,0,Written By,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV3JpdHRlbiBCeSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -19,ClaimNo,4,0,0,Claim No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2xhaW0gIyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -20,GrandTotal,4,0,0,Grand Total,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiR3JhbmQgVG90YWwiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -21,VehicleOut,5,0,0,Vehicle Out,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVmVoaWNsZSBPdXQiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -22,TypeOfLoss,5,0,0,Type of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVHlwZSBvZiBMb3NzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -23,Insured,5,0,0,Insured,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW5zdXJlZCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -24,PolicyNo,4,0,0,Policy no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUG9saWN5ICMiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -25,Fax,4,0,0,Fax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -26,WorkfileID,4,0,0,Workfile ID,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV29ya2ZpbGUgSUQiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -27,Telephone,4,0,0,Telephone,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGhvbmUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -28,DaysToRepair,4,0,0,Days to Repair,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF5cyB0byBSZXBhaXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -29,CUSTOMERPAY,4,0,0,CUSTOMER PAY,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ1VTVE9NRVIgUEFZIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -30,Subtotal,4,0,0,Subtotal,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3VidG90YWwiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -31,INSURANCEPAY,4,0,0,INSURANCE PAY,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSU5TVVJBTkNFIFBBWSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -32,Condition,5,0,0,Condition,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ29uZGl0aW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -33,JobNo,4,0,0,Jon no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSm9iICMiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -34,ProductionDate,4,0,0,Production Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvZHVjdGlvbiBEYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -35,State,5,0,0,State,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -36,FederalID,4,0,0,Federal ID,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmVkZXJhbCBJRCIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -37,MileageOut,5,0,0,Mileage Out,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWlsZWFnZSBPdXQiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -38,RONumber,4,0,0,RO Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUk8gTnVtYmVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -39,Deductible,4,0,0,Deductible,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGVkdWN0aWJsZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -40,License,5,0,0,License,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTGljZW5zZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -41,VIN,4,0,0,VIN,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVklOIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -42,PointOfImpact,5,0,0,Point of Impact,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUG9pbnQgb2YgSW1wYWN0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 -43,DateOfLoss,4,0,0,Date of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSBvZiBMb3NzOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkRhdGUgT2YgTG9zcyIsIkxhbmd1YWdlIjoiZW4ifV19fQ== -44,InspectionLocation,5,0,0,Inspection Location,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW5zcGVjdGlvbiBMb2NhdGlvbjoiLCJMYW5ndWFnZSI6ImVuIn1dfX0= -45,Owner,5,0,0,Owner,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT3duZXI6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19 +47,ExteriorColor,5,0,0,Exterior Color,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXh0ZXJpb3IgQ29sb3IiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +48,InteriorColor,5,0,0,Interior Color,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW50ZXJpb3IgQ29sb3IiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +49,Page Number,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGFnZSAjOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +50,JobDescription,5,0,0,Job Description,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSm9iIERlc2NyaXB0aW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +51,SBCess,4,0,0,Swachh Bharat Cess,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU0IgQ2VzcyBvbiBUYXhhYmxlIFZhbHVlIFtCXSIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IlNlcnZpY2UgVGF4IG9uIFRheGFibGUgVmFsdWUgW0JdIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiU0IgQ2VzcyBsZXZpZWQgYnkgVmVuZG9yIFtBXSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +52,InvoiceNumber,4,1,0,Invoice Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW52IE5vIyIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ikludm9pY2UgTnVtYmVyOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ikludm9pY2UgIyIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ikludm9pY2UgTnVtYmVyIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiVEFYIElOVk9JQ0UgTlVNQkVSIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSW52IE5vICM6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSW52IE5pICM6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +53,Fax,4,0,0,FaxNo,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +54,Total,4,1,0,Grand Total,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVG90YWwgQ29zdCIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IlRPVEFMIElOQyBHU1Q6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiVG90YWwiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJJTlZPSUNFIFRPVEFMIElOQ0xVRElORyBHU1QiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJUb3RhbCBJbnZvaWNlIFZhbHVlIChScy4pIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +55,Address,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT2ZmaWNlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQWRkcmVzcyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +56,WorkSite,5,0,0,Work Site,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV29yayBTaXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +57,SalesPerson,5,0,0,,e30=,0,0 +58,Brand,5,0,0,Brand,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmQiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +59,Website,5,0,0,Website Address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV2Vic2l0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +60,EmailAddress,5,0,0,Email address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQVRUT1JORVkiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJFbWFpbCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +61,MatterNumber,5,0,0,Matter Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWF0dGVyIE51bWJlciIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Ik1hdHRlciBOdW1iZXI6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +62,RegdOffice,5,0,0,Regd Office,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVnZC4gT2ZmaWNlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +63,Terms,5,0,0,Payment Terms,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGVybXMiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJQYXltZW50IFRlcm1zIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +64,EstNo,4,0,0,Est No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXN0IE5vIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiRXN0IE5pOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +65,EstDate,4,0,0,Est Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXN0IERhdGUiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJFc3QgRGF0ZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +66,CampaignName,5,0,0,Campaign Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2FtcGFpZ24gTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +67,ServiceTax,4,0,0,Service Tax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBUYXggbGV2aWVkIGJ5IFZlbmRvciBbQV0iLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +68,AgencyCommission,4,0,0,Agency Commission,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWdlbmN5IENvbW1pc3Npb24iLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +69,BeneficiaryName,5,0,0,Beneficiary Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQmVuZWZpY2lhcnkgTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +70,Sub Brand,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3ViIEJyYW5kIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +71,PANNo,4,0,0,PAN NO,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUEFOIE5POiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +72,Credit,5,0,0,Credit,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ3JlZGl0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +73,CINNo,4,0,0,CIN No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ0lOIE5vOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +74,SwiftCode,4,0,1,Swift Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3dpZnQgQ29kZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +75,CustName,5,0,1,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVG86IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ2xpZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ3VzdG9tZXIgTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +76,Telephone,4,0,0,Telephone,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGVsIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiVGVsZXBob25lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +77,BankName,5,0,0,Bank Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQkFOSyBOYW1lOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkJhbmsiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +78,Price,4,0,0,Price,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJpY2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +79,Qty,4,0,0,Quantity,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUXR5IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +80,Description,5,0,0,Description,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGVzY3JpcHRpb24iLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +81,GLCode,4,0,0,GL Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiR0wgQ29kZSAvIEl0ZW0iLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +82,SoldTo,5,0,0,Sold To,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU29sZCBUbyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +83,ABN,5,0,0,ABN number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQUJOIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +84,Phone,4,0,0,Phone no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGhvbmUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +85,Regarding,5,0,0,Regarding,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVnYXJkaW5nIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUkU6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +86,RequestingManager,5,0,0,Requesting Manager,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVxdWVzdGluZyBNYW5hZ2VyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +87,InvoiceDate,4,0,0,Invoice Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW52IERhdGUiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJEYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSU5WT0lDRSBEQVRFIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +88,DueDate,4,0,0,Due Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRFVFIERBVEUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +89,AccNo,4,0,0,Account Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWNjb3VudCBObzoiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJBY2NvdW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQWNjdCBObyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +90,BSB,4,0,0,BSB No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQlNCIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +91,AccName,5,0,0,Account Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWNjdCBOYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQWNjb3VudCBOYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +92,SubTotal,4,0,0,Sub Total before tax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3ViIFRvdGFsIChScy4pIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiU1VCVE9UQUw6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiSU5WT0lDRSBUT1RBTCBFWENMVURJTkcgR1NUIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +93,Tax,4,0,0,Tac amounts,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoic2FsZXMgdGF4IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiR1NUIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +94,PurchaseNo,4,0,0,Purchase number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUC5PLiBOdW1iZXIiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJPcmRlciBOdW1iZXIiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJPcmRlciAjIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUHVyY2hhc2UgTmJyIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUE8gTnVtYmVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +95,ShipTo,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2hpcCBUbzoiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +96,BranchOffice,5,0,0,Branch Office,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmNoIE9mZmljZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +97,IFSCCode,4,0,0,IFSC Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSUZTQyBDb2RlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +98,CentralisedBillingAndAccOffice,5,0,0,Centralised Billing and Accounting Office,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2VudHJhbGlzZWQgQmlsbGluZyBhbmQgQWNjb3VudGluZyBPZmZpY2U6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +99,ServiceTaxCategory,5,0,0,Service Tax Category,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBUYXggQ2F0ZWdvcnk6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +100,ServiceTaxRegnNo,4,0,0,Service Tax Regn No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBUYXggUmVnbiBObzoiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +101,Branch,5,0,0,Branch,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmNoIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQnJhbmNoOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +168,StartDate,5,0,0,Start Date of Minimum payment period per service component,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhcnQgRGF0ZSBvZiBNaW5pbXVtIHBheW1lbnQgcGVyaW9kIHBlciBzZXJ2aWNlIGNvbXBvbmVudCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +169,ZipCode,4,0,0,Zip Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiWmlwIENvZGUiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJaaS4gQ29kZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +170,ServiceComponents,5,0,0,Service Components,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBDb21wb25lbnRzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +171,ExistingCircuitIds,4,0,0,existing circuit ids,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiZXhpc3RpbmcgY2lyY3VpdCBpZHMiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +172,SignedDate,4,0,0,Signed Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +173,StateProvince,5,0,0,State Province,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdGUvUHJvdmluY2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +174,Attention,5,0,0,Attention,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXR0biIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +175,Country,5,0,0,Country,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ291bnRyeSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +176,ReferenceNo,4,0,0,Reference No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTUEgUmVmZXJlbmNlIE5vLiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkFUJlQgUEEgUmVmZXJlbmNlIE5vLjoiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJBVCZUIFBTIFJlZmVyZW5jZSBOby46IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUFMvQ1NBIFJlZmVyZW5jZSBOby4iLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +177,PreExistingContractNo,4,0,0,Pre Existing Contract No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicHJlLWV4aXN0aW5nIENvbnRyYWN0IG5vIChtdXN0IGJlIGluY2x1ZGVkKSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +178,AccNo,4,0,0,AccountNumber,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiYWNjb3VudCBudW1iZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +179,PercMonthlyFee,5,0,0,Percentage of Monthly Fee,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2FsY3VsYXRpb24gb2YgZWFybHkgdGVybWluYXRpb24gY2hhcmdlcyoiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +180,Customer,5,0,0,Customer,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ3VzdG9tZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +181,SDAcode,4,0,0,SDA code,e30=,0,0 +182,ContractIDNo,4,0,0,contract id no,e30=,0,0 +183,DS1No,4,0,0,ds1 no,e30=,0,0 +184,PRINo,5,0,0,PRI No,e30=,0,0 +185,City,5,0,0,City,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2l0eSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +186,SalesRegion,5,0,0,Sales Region,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2FsZXMgUmVnaW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +187,Name,5,0,0,Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +188,Title,5,0,0,Title,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGl0bGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +189,PricingTerm,5,0,0,Pricing Schedule Term,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJpY2luZyBTY2hlZHVsZSBUZXJtIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +190,SalesStrata,5,0,0,Sales Strata,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2FsZXMgU3RyYXRhIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +191,SalesBranchManager,5,0,0,Sales Branch Manager,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2FsZXMgLyBCcmFuY2ggTWFuYWdlciIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkJyYW5jaCBNYW5hZ2VyOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +192,EmailAddress,5,0,0,Email Address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW1haWwiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +193,TeleFax,4,0,0,Telephone and Fax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiRXhpc3RpbmcgU2VydmljZSIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IlRlbGVwaG9uZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +194,StreetAddress,5,0,0,Street Address,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RyZWV0IEFkZHJlc3MiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +195,MinPayPeriod,5,0,0,Minimum Payment Period,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicGVyIFNlcnZpY2UgQ29tcG9uZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +196,ProgramCode,4,0,0,Program Code,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvZ3JhbSBDb2RlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +197,SCVPName,5,0,0,SCVP Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoic2N2cCBuYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +198,RatesForMinPayment,5,0,0,Rates following the end of minimum payment,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmF0ZXMgZm9sbG93aW5nIHRoZSBlbmQgb2YgbWluaW11bSBwYXltZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +199,Branch Transit Number,4,0,1,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQnJhbmNoIFRyYW5zaXQgTm8uIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQnJhbmNoIFRyYW5zaXQgTm8uOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +200,RateStabilization,5,0,0,Rate Stabilization per service component,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmF0ZSBTdGFiaWxpemF0aW9uIHBlciBzZXJ2aWNlIGNvbXBvbmVudCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +201,EfffectiveDate,5,0,0,Effective Date of this pricing schedule,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRWZmZWN0aXZlIERhdGUgb2YgdGhpcyBwcmljaW5nIHNjaGVkdWxlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +46,MileageIn,4,0,0,Mileage In,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWlsZWFnZSBJbiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +102,Attention,5,0,0,Attention,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXR0biIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +103,DOB,4,0,0,Date of Birth,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSBvZiBCaXJ0aCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +104,StartDate,4,0,0,Start Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhcnQgRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +105,Title,5,0,0,Title,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGl0bGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +106,PlaceOfBirth,5,0,0,Place of Birth,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGxhY2Ugb2YgQmlydGgiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +107,Status,5,0,0,Status,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdHVzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +108,Employee,5,0,0,Employee,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW1wbG95ZWUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +109,FullName,5,0,0,Full Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRnVsbCBOYW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +110,Subject,5,0,0,Subject,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3ViamVjdCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +111,AnnualSalary,4,0,0,Annual Salary,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQW5udWFsIFNhbGFyeSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +112,Citizenship,5,0,0,Citizenship,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2l0aXplbnNoaXAiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +113,ExpireDate,4,0,0,Expire Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRXhwaXJlIERhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +114,PassportNo,4,0,0,Passport no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGFzc3BvcnQgbm8uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +115,Gender,5,0,0,Gender,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiR2VuZGVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +116,IssueDate,4,0,0,Issue Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSXNzdWUgRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +117,Smoking Status,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU21va2luZyBTdGF0dXM6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +118,ServiceDept,5,0,0,Service Department,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2VydmljZSBEZXB0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +119,PCP,5,0,0,PCP,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUENQIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +120,ProgressNotes,5,0,0,Progress Notes,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvZ3Jlc3MgTm90ZXMiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +121,AppointmentFacility,5,0,0,Appointment Facility,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXBwb2ludG1lbnQgRmFjaWxpdHkiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +122,Referring,5,0,0,Referring,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVmZXJyaW5nIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +123,MedPrimary,5,0,0,med primary,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoibWVkIHByaW1hcnkiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +124,Prescription,5,0,0,prescription,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicHJlc2NyaXB0aW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +125,PrimaryCareProvider,5,0,0,primary care provider,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoicHJpbWFyeSBjYXJlIHByb3ZpZGVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +126,Telephone,4,0,0,Telephone,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGVsZXBob25lIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiUGgiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJIb3JtZToiLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJIb3JuZToiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +127,FaxNo,4,0,0,Fax Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +128,NPI,4,0,0,NPI,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTlBJIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +129,FollowUp,5,0,0,Follow Up,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRm9sbG93IFVwIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ2xhaW0iLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJEaXZpc2lvbiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6Imxsw6NvbGxvdyBVcCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +130,Name,5,0,0,Name,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTmFtZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +131,Diabetes,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGlhYmV0ZXMiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +132,AppointmentDateTime,4,0,0,Appt. Date/Time,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQXBwdC4gRGF0ZS9UaW1lIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +133,DOB,4,0,0,Date of Birth,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRE9CIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +134,Marital status,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWFyaXRhbCBzdGF0dXMiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +135,Alcohol intake,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWxjb2hvbCBpbnRha2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +136,Hypertension,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSHlwZXJ0ZW5zaW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +137,Occupation,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT2NjdXBhdGlvbiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +138,Kidney Stones,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiS2lkbmV5IFN0b25lcyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +139,Celebrex,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ0VMRUJSRVg6IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ8OLTEVCUkVYOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkNFSS5FQlJpPVg6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +140,Employer,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW1wbG95ZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +141,Vitals,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVml0YWxzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +142,ROS,5,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUk9TIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +143,Quantity,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUXR5OiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +144,Refills,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVmaWxsczoiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +145,BodyMassIndex,4,0,0,Body Mass Index (BMI),eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQk1JIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +146,Weight,4,0,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV3QiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +147,EncounterDate,4,0,0,Encounter Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRW5jb3VudGVyIERhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +148,Provider,5,0,0,Provider,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvdmlkZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +149,Insurance,5,0,0,Insurance,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW5zdXJhbmNlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +150,Client,5,0,0,Client,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2xpZW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ2xpZW50IE5hbWUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +151,InvestigatingAgency,5,0,0,Investigating Agency,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW52ZXN0aWdhdGluZyBBZ2VuY3kiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +152,County,5,0,0,County,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ291bnR5IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +153,Parties,5,0,0,Parties,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUEFSVFkgMToiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +154,TransactionNo,4,0,0,Transaction Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVHJhbnNhY3Rpb24gIyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +155,Date,4,0,0,Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +156,TimeofLoss,4,0,0,Time of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVElNRSBPRiBMT1NTOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +157,ClaimNo,4,0,0,Claim Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2xhaW0iLCJMYW5ndWFnZSI6ImVuIn0seyJLZXlBbGlhc05hbWUiOiJDbGFpbSBObyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +158,State,5,0,0,State,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +159,DateOfLoss,4,0,0,Date Of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSBPZiBMb3NzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +160,DriverLicense,4,0,0,Driver License No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRHJpdmVyIExpY2Vuc2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +161,Street,5,0,0,Street,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RyZWV0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +162,Division,5,0,0,Division,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRElWSVNJT046IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiRGl2aXNpb24gQ29kZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +163,Adjuster,5,0,0,Adjuster,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQURKVVNURVI6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +164,ReportNumber,4,0,0,Report Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVwb3J0IE51bWJlciIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +165,ReportType,5,0,0,Report Type,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUmVwb3J0IFR5cGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +166,Tag,5,0,0,Tag,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVGFnIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +167,City,5,0,0,City,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2l0eSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +1,InvestmentName,5,1,0,,e30=,0,0 +2,InvestorName,5,1,0,,e30=,0,0 +3,CapBalance,4,1,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2FwaXRhbCBCYWxhbmNlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiYW1vdW50IiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiY2FwaXRhbCIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6ImJhbGFuY2UiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +4,FundAsOfDate,4,1,0,,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRnVuZCBhcyBvZiBkYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoicGVyaW9kIGVuZCBkYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +5,IssuedDate,4,0,0,Issued Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSXNzdWVkIERhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +6,IssuedAt,5,0,0,Issued At,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSXNzdWVkIEF0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +7,Master,5,0,0,Master/Captain,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWFzdGVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +8,Shipper,5,0,0,Shipper,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU2hpcHBlciIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +9,BLNo,5,0,0,Bill of Lading number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQkwgTk86IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +10,Flag,5,0,0,Flag,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmxhZyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +11,Consignee,5,0,0,Consignee,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ29uc2lnbmVlIiwiTGFuZ3VhZ2UiOiJlbiJ9LHsiS2V5QWxpYXNOYW1lIjoiQ29uc2lnbmVlOiIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +12,VoyageNo,4,0,0,Voyage No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVm95YWdlIE5vIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +13,NotifyParty,5,0,0,Notify Party,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTm90aWZ5IFBhcnR5IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +14,OnboardTanker,5,0,0,OnboardTanker,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT24gYm9hcmQgdGhlIFRhbmtlciIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +15,LoadingPort,5,0,0,Loading Port,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTG9hZGluZyBQb3J0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +16,DeliveryPort,5,0,0,Delivery Port,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVG8gYmUgZGVsaXZlcmVkIHRvIHRoZSBwb3J0IG9mIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +17,Adjuster,5,0,0,Adjuster,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQWRqdXN0ZXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +18,WrittenBy,5,0,0,Written By,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV3JpdHRlbiBCeSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +19,ClaimNo,4,0,0,Claim No,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ2xhaW0gIyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +20,GrandTotal,4,0,0,Grand Total,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiR3JhbmQgVG90YWwiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +21,VehicleOut,5,0,0,Vehicle Out,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVmVoaWNsZSBPdXQiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +22,TypeOfLoss,5,0,0,Type of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVHlwZSBvZiBMb3NzIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +23,Insured,5,0,0,Insured,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW5zdXJlZCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +24,PolicyNo,4,0,0,Policy no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUG9saWN5ICMiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +25,Fax,4,0,0,Fax,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmF4IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +26,WorkfileID,4,0,0,Workfile ID,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiV29ya2ZpbGUgSUQiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +27,Telephone,4,0,0,Telephone,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUGhvbmUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +28,DaysToRepair,4,0,0,Days to Repair,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF5cyB0byBSZXBhaXIiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +29,CUSTOMERPAY,4,0,0,CUSTOMER PAY,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ1VTVE9NRVIgUEFZIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +30,Subtotal,4,0,0,Subtotal,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3VidG90YWwiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +31,INSURANCEPAY,4,0,0,INSURANCE PAY,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSU5TVVJBTkNFIFBBWSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +32,Condition,5,0,0,Condition,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiQ29uZGl0aW9uIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +33,JobNo,4,0,0,Jon no,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSm9iICMiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +34,ProductionDate,4,0,0,Production Date,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUHJvZHVjdGlvbiBEYXRlIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +35,State,5,0,0,State,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiU3RhdGUiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +36,FederalID,4,0,0,Federal ID,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRmVkZXJhbCBJRCIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +37,MileageOut,5,0,0,Mileage Out,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTWlsZWFnZSBPdXQiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +38,RONumber,4,0,0,RO Number,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUk8gTnVtYmVyIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +39,Deductible,4,0,0,Deductible,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGVkdWN0aWJsZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +40,License,5,0,0,License,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiTGljZW5zZSIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +41,VIN,4,0,0,VIN,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiVklOIiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +42,PointOfImpact,5,0,0,Point of Impact,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiUG9pbnQgb2YgSW1wYWN0IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 +43,DateOfLoss,4,0,0,Date of Loss,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiRGF0ZSBvZiBMb3NzOiIsIkxhbmd1YWdlIjoiZW4ifSx7IktleUFsaWFzTmFtZSI6IkRhdGUgT2YgTG9zcyIsIkxhbmd1YWdlIjoiZW4ifV19fQ==,0,0 +44,InspectionLocation,5,0,0,Inspection Location,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiSW5zcGVjdGlvbiBMb2NhdGlvbjoiLCJMYW5ndWFnZSI6ImVuIn1dfX0=,0,0 +45,Owner,5,0,0,Owner,eyJrZXlDb250ZXh0Ijp7IktleUFsaWFzTGlzdCI6W3siS2V5QWxpYXNOYW1lIjoiT3duZXI6IiwiTGFuZ3VhZ2UiOiJlbiJ9XX19,0,0 diff --git a/ACA/configuration-ha/DB2/CSVFiles/object_type.csv b/ACA/configuration-ha/DB2/CSVFiles/object_type.csv index 2c750495..138c609b 100644 --- a/ACA/configuration-ha/DB2/CSVFiles/object_type.csv +++ b/ACA/configuration-ha/DB2/CSVFiles/object_type.csv @@ -1,5 +1,5 @@ -1,Object,Object,sys,1,1,0,1,Default type - Object -2,Numeric,Numeric,sys,1,1,0,1,Default type - Numeric -3,Alphabetic,Alphabetic,sys,1,1,0,1,Default type - Alphabetic -4,ExtendedNumeric,ExtendedNumeric,sys,1,2,0,1,Default type - Extended Numeric -5,ExtendedAlphabetic,ExtendedAlphabetic,sys,1,3,0,1,Default type - Extended Alphabetic \ No newline at end of file +1,Object,Object,sys,1,1,0,1,Default type - Object,e30= +2,Numeric,Numeric,sys,1,1,0,1,Default type - Numeric,e30= +3,Alphabetic,Alphabetic,sys,1,1,0,1,Default type - Alphabetic,e30= +4,ExtendedNumeric,ExtendedNumeric,sys,1,2,0,1,Default type - Extended Numeric,e30= +5,ExtendedAlphabetic,ExtendedAlphabetic,sys,1,3,0,1,Default type - Extended Alphabetic,e30= \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/CreateBaseDB.bat b/ACA/configuration-ha/DB2/CreateBaseDB.bat index 89d93e46..d31622d0 100644 --- a/ACA/configuration-ha/DB2/CreateBaseDB.bat +++ b/ACA/configuration-ha/DB2/CreateBaseDB.bat @@ -1,56 +1,66 @@ -@echo off -SETLOCAL - -IF NOT DEFINED skip_create_base_db ( - set skip_create_base_db=false -) - -IF "%skip_create_base_db%"=="true" ( - echo -- - echo This script will initialize an existing DB2 database for use as a BACA base database. - echo -- -) ELSE ( - echo -- - echo This script will create and initialize a new DB2 database for use as a BACA base database. An existing database user must exist. - echo -- -) - - -set /p base_db_name= Enter the name of the Base BACA database. If nothing is entered, we will use the following default value 'CABASEDB': -IF NOT DEFINED base_db_name SET "base_db_name=CABASEDB" - -set /p base_db_user= Enter the name of the database user for the Base BACA database. If nothing is entered, we will use the following default value 'CABASEUSER' : -IF NOT DEFINED base_db_user SET "base_db_user=CABASEUSER" - -set /P c=Are you sure you want to continue[Y/N]? -if /I "%c%" EQU "N" goto :DOEXIT - -IF "%skip_create_base_db%"=="true" ( - goto :DOCREATETABLE -) ELSE ( - goto :DOCREATE -) - -:DOCREATE - echo "Creating a database...." - db2 CREATE DATABASE %base_db_name% AUTOMATIC STORAGE YES USING CODESET UTF-8 TERRITORY DEFAULT COLLATE USING SYSTEM PAGESIZE 32768 - db2 CONNECT TO %base_db_name% - db2 GRANT CONNECT,DATAACCESS ON DATABASE TO USER %base_db_user% - db2 GRANT USE OF TABLESPACE USERSPACE1 TO USER %base_db_user% - db2 CONNECT RESET - goto DOCREATETABLE -:DOCREATETABLE - db2 CONNECT TO %base_db_name% - db2 SET SCHEMA %base_db_user% - echo "Creating table TENANTINFO...." - db2 CREATE TABLE TENANTINFO (tenantid varchar(128) NOT NULL,ontology varchar(128) not null,tenanttype smallint not null with default,dailylimit smallint not null with default 0,rdbmsengine varchar(128) not null,dbname varchar(255) not null,dbuser varchar(255) not null,bacaversion varchar(1024) not null,rdbmsconnection varchar(1024) for bit data default null,mongoconnection varchar(1024) for bit data default null,mongoadminconnection varchar(1024) for bit data default null,featureflags bigint not null with default 0,tenantdbversion varchar(255),CONSTRAINT tenantinfo_pkey PRIMARY KEY (tenantid, ontology) ) - db2 CONNECT RESET - goto END -:DOEXIT - echo "Exited on user input" - goto END -:END - set skip_create_base_db= - echo "END" - +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SETLOCAL + +IF NOT DEFINED skip_create_base_db ( + set skip_create_base_db=false +) + +IF "%skip_create_base_db%"=="true" ( + echo -- + echo This script will initialize an existing DB2 database for use as a BACA base database. + echo -- +) ELSE ( + echo -- + echo This script will create and initialize a new DB2 database for use as a BACA base database. An existing database user must exist. + echo -- +) + + +set /p base_db_name= Enter the name of the Base BACA database. If nothing is entered, we will use the following default value 'CABASEDB': +IF NOT DEFINED base_db_name SET "base_db_name=CABASEDB" + +set /p base_db_user= Enter the name of the database user for the Base BACA database. If nothing is entered, we will use the following default value 'CABASEUSER' : +IF NOT DEFINED base_db_user SET "base_db_user=CABASEUSER" + +set /P c=Are you sure you want to continue[Y/N]? +if /I "%c%" EQU "N" goto :DOEXIT + +IF "%skip_create_base_db%"=="true" ( + goto :DOCREATETABLE +) ELSE ( + goto :DOCREATE +) + +:DOCREATE + echo "Creating a database...." + db2 CREATE DATABASE %base_db_name% AUTOMATIC STORAGE YES USING CODESET UTF-8 TERRITORY DEFAULT COLLATE USING SYSTEM PAGESIZE 32768 + db2 CONNECT TO %base_db_name% + db2 GRANT CONNECT,DATAACCESS ON DATABASE TO USER %base_db_user% + db2 GRANT USE OF TABLESPACE USERSPACE1 TO USER %base_db_user% + db2 CONNECT RESET + goto DOCREATETABLE +:DOCREATETABLE + db2 CONNECT TO %base_db_name% + db2 SET SCHEMA %base_db_user% + echo "Creating table TENANTINFO...." + db2 CREATE TABLE TENANTINFO (tenantid varchar(128) NOT NULL,ontology varchar(128) not null,tenanttype smallint not null with default,dailylimit smallint not null with default 0,rdbmsengine varchar(128) not null,dbname varchar(255) not null,dbuser varchar(255) not null,bacaversion varchar(1024) not null,rdbmsconnection varchar(1024) for bit data default null,mongoconnection varchar(1024) for bit data default null,mongoadminconnection varchar(1024) for bit data default null,featureflags bigint not null with default 0,tenantdbversion varchar(255),last_job_run_time BIGINT not null with default 0,dbstatus smallint not null with default 0,CONSTRAINT tenantinfo_pkey PRIMARY KEY (tenantid, ontology) ) + db2 CONNECT RESET + goto END +:DOEXIT + echo "Exited on user input" + goto END +:END + set skip_create_base_db= + echo "END" + ENDLOCAL \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/CreateBaseDB.sh b/ACA/configuration-ha/DB2/CreateBaseDB.sh index b60b688e..a859e979 100755 --- a/ACA/configuration-ha/DB2/CreateBaseDB.sh +++ b/ACA/configuration-ha/DB2/CreateBaseDB.sh @@ -1,5 +1,11 @@ #!/bin/bash - +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end # NOTES: # This script will create a new DB2 database to be used as the Content Analyzer Base database and initialize the database. # If you prefer to create your own database, and only want the script to initialize the existing database, diff --git a/ACA/configuration-ha/DB2/DeleteOntology.sh b/ACA/configuration-ha/DB2/DeleteOntology.sh index b9acc0f6..195389c3 100755 --- a/ACA/configuration-ha/DB2/DeleteOntology.sh +++ b/ACA/configuration-ha/DB2/DeleteOntology.sh @@ -1,4 +1,12 @@ #!/bin/bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + . ./ScriptFunctions.sh echo -e "\n-- This script will delete an existing ontology from a tenant" diff --git a/ACA/configuration-ha/DB2/DeleteTenant.sh b/ACA/configuration-ha/DB2/DeleteTenant.sh index b5f93a40..61248987 100755 --- a/ACA/configuration-ha/DB2/DeleteTenant.sh +++ b/ACA/configuration-ha/DB2/DeleteTenant.sh @@ -1,4 +1,12 @@ #!/bin/bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + . ./ScriptFunctions.sh echo -e "\n-- This script will delete an existing BACA tenant" diff --git a/ACA/configuration-ha/DB2/InitBaseDB.bat b/ACA/configuration-ha/DB2/InitBaseDB.bat index 72325aaf..40ec9d4a 100644 --- a/ACA/configuration-ha/DB2/InitBaseDB.bat +++ b/ACA/configuration-ha/DB2/InitBaseDB.bat @@ -1,4 +1,15 @@ -SET skip_create_base_db=true - -CreateBaseDB.bat - +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SET skip_create_base_db=true + +CreateBaseDB.bat + diff --git a/ACA/configuration-ha/DB2/InitBaseDB.sh b/ACA/configuration-ha/DB2/InitBaseDB.sh index 92bccdd3..4815ded2 100755 --- a/ACA/configuration-ha/DB2/InitBaseDB.sh +++ b/ACA/configuration-ha/DB2/InitBaseDB.sh @@ -1,4 +1,11 @@ #!/bin/bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end echo echo "==================================================" diff --git a/ACA/configuration-ha/DB2/InitTenantDB.bat b/ACA/configuration-ha/DB2/InitTenantDB.bat index 97d83a2b..ae05a2eb 100644 --- a/ACA/configuration-ha/DB2/InitTenantDB.bat +++ b/ACA/configuration-ha/DB2/InitTenantDB.bat @@ -1,4 +1,15 @@ -SET skip_create_tenant_db=true - -AddTenant.bat - +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SET skip_create_tenant_db=true + +AddTenant.bat + diff --git a/ACA/configuration-ha/DB2/InitTenantDB.sh b/ACA/configuration-ha/DB2/InitTenantDB.sh index 182d5ebf..b19e754d 100755 --- a/ACA/configuration-ha/DB2/InitTenantDB.sh +++ b/ACA/configuration-ha/DB2/InitTenantDB.sh @@ -1,4 +1,11 @@ #!/bin/bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end echo echo "==================================================" diff --git a/ACA/configuration-ha/DB2/ScriptFunctions.sh b/ACA/configuration-ha/DB2/ScriptFunctions.sh index 4d40ce59..910b4952 100755 --- a/ACA/configuration-ha/DB2/ScriptFunctions.sh +++ b/ACA/configuration-ha/DB2/ScriptFunctions.sh @@ -1,5 +1,13 @@ #!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + function askForConfirmation(){ while [[ $confirmation != "y" && $confirmation != "n" && $confirmation != "yes" && $confirmation != "no" ]] # While confirmation is not y or n... do diff --git a/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB.bat b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB.bat new file mode 100755 index 00000000..92fefe9d --- /dev/null +++ b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB.bat @@ -0,0 +1,47 @@ +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SETLOCAL + +set /p base_db_name= Please enter a valid value for the base database name : +set /p base_db_user= Please enter a valid value for the base database user name : +set /p tenant_id= Please enter a valid value for the tenant ID: +set /p tenant_ontology= Please enter a valid value for the tenant ontology: + +echo +echo "-- Please confirm these are the desired settings:" +echo " - Base database name: %base_db_name%" +echo " - Base database user name: %base_db_user%" +echo " - Tenant ID: %tenant_id%" +echo " - Tenant ontology: %tenant_ontology%" + + +set /P c=Are you sure you want to continue[Y/N]? +if /I "%c%" EQU "Y" goto :DOCREATE +if /I "%c%" EQU "N" goto :DOEXIT + +:DOCREATE + echo "Connecting to db and schema" + db2 CONNECT TO %base_db_name% + db2 SET SCHEMA %base_db_user% + db2 update tenantinfo set TENANTDBVERSION=1.5 where TENANTID='%tenant_id%' and ONTOLOGY='%tenant_ontology%' + db2 update tenantinfo set FEATUREFLAGS=BITOR(2, (select FEATUREFLAGS from tenantinfo where TENANTID='%tenant_id%' and ONTOLOGY='%tenant_ontology%')) where TENANTID='%tenant_id%' and ONTOLOGY='%tenant_ontology%' + db2 connect reset + goto END + +:DOEXIT + echo "Exited on user input" + goto END + +:END + echo "END" + +ENDLOCAL \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB.sh b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB.sh new file mode 100755 index 00000000..20ddb55f --- /dev/null +++ b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + +. ./ScriptFunctions.sh + +if [[ -z $INPUT_PROPS_FILENAME ]]; then + INPUT_PROPS_FILENAME="./common_for_DB2_Tenant_Upgrade.sh" +fi + +if [ -f $INPUT_PROPS_FILENAME ]; then + echo "Found a $INPUT_PROPS_FILENAME. Reading in variables from that script." + . $INPUT_PROPS_FILENAME +fi + +echo -e "\n-- This script will update the tenant's info in the TENANTINFO table in the base DB" +echo + +while [[ $base_db_name == '' ]] +do + echo "Please enter a valid value for the base database name :" + read base_db_name + while [ ${#base_db_name} -gt 8 ]; + do + echo "Please enter a valid value for the base database name :" + read base_db_name; + echo ${#base_db_name}; + done +done + +while [[ -z "$base_db_user" || $base_db_user == "" ]] +do + echo "Please enter a valid value for the base database user name :" + read base_db_user +done + +while [[ -z "$tenant_id" || $tenant_id == '' ]] +do + echo "Please enter a valid value for the tenant ID:" + read tenant_id +done + +while [[ -z "$tenant_ontology" || $tenant_ontology == '' ]] +do + echo "Please enter a valid value for the tenant ontology:" + read tenant_ontology +done + +echo +echo "-- Please confirm these are the desired settings:" +echo " - Base database name: $base_db_name" +echo " - Base database user name: $base_db_user" +echo " - Tenant ID: $tenant_id" +echo " - Tenant ontology: $tenant_ontology" +askForConfirmation + +cp sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql.template sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql +sed -i s/\$base_db_name/"$base_db_name"/g sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql +sed -i s/\$base_db_user/"$base_db_user"/g sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql +sed -i s/\$tenant_id/"$tenant_id"/g sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql +sed -i s/\$tenant_ontology/"$tenant_ontology"/g sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql +echo +echo "Running upgrade script: sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql" +db2 -stvf sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.bat b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.bat new file mode 100755 index 00000000..3115c91a --- /dev/null +++ b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.bat @@ -0,0 +1,47 @@ +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SETLOCAL + +set /p base_db_name= Please enter a valid value for the base database name : +set /p base_db_user= Please enter a valid value for the base database user name : +set /p tenant_id= Please enter a valid value for the tenant ID: +set /p tenant_ontology= Please enter a valid value for the tenant ontology: + +echo +echo "-- Please confirm these are the desired settings:" +echo " - Base database name: %base_db_name%" +echo " - Base database user name: %base_db_user%" +echo " - Tenant ID: %tenant_id%" +echo " - Tenant ontology: %tenant_ontology%" + + +set /P c=Are you sure you want to continue[Y/N]? +if /I "%c%" EQU "Y" goto :DOCREATE +if /I "%c%" EQU "N" goto :DOEXIT + +:DOCREATE + echo "Connecting to db and schema" + db2 CONNECT TO %base_db_name% + db2 SET SCHEMA %base_db_user% + db2 update tenantinfo set TENANTDBVERSION=1.4 where TENANTID='%tenant_id%' and ONTOLOGY='%tenant_ontology%' + db2 update tenantinfo set FEATUREFLAGS=BITOR(4, (select FEATUREFLAGS from tenantinfo where TENANTID='%tenant_id%' and ONTOLOGY='%tenant_ontology%')) where TENANTID='%tenant_id%' and ONTOLOGY='%tenant_ontology%' + db2 connect reset + goto END + +:DOEXIT + echo "Exited on user input" + goto END + +:END + echo "END" + +ENDLOCAL \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sh b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sh new file mode 100755 index 00000000..d6e251e7 --- /dev/null +++ b/ACA/configuration-ha/DB2/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sh @@ -0,0 +1,68 @@ +#!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end +. ./ScriptFunctions.sh + +if [[ -z $INPUT_PROPS_FILENAME ]]; then + INPUT_PROPS_FILENAME="./common_for_DB2_Tenant_Upgrade.sh" +fi + +if [ -f $INPUT_PROPS_FILENAME ]; then + echo "Found a $INPUT_PROPS_FILENAME. Reading in variables from that script." + . $INPUT_PROPS_FILENAME +fi + +echo -e "\n-- This script will update the tenant's info in the TENANTINFO table in the base DB" +echo + +while [[ $base_db_name == '' ]] +do + echo "Please enter a valid value for the base database name :" + read base_db_name + while [ ${#base_db_name} -gt 8 ]; + do + echo "Please enter a valid value for the base database name :" + read base_db_name; + echo ${#base_db_name}; + done +done + +while [[ -z "$base_db_user" || $base_db_user == "" ]] +do + echo "Please enter a valid value for the base database user name :" + read base_db_user +done + +while [[ -z "$tenant_id" || $tenant_id == '' ]] +do + echo "Please enter a valid value for the tenant ID:" + read tenant_id +done + +while [[ -z "$tenant_ontology" || $tenant_ontology == '' ]] +do + echo "Please enter a valid value for the tenant ontology:" + read tenant_ontology +done + +echo +echo "-- Please confirm these are the desired settings:" +echo " - Base database name: $base_db_name" +echo " - Base database user name: $base_db_user" +echo " - Tenant ID: $tenant_id" +echo " - Tenant ontology: $tenant_ontology" +askForConfirmation + +cp sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql.template sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql +sed -i s/\$base_db_name/"$base_db_name"/g sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql +sed -i s/\$base_db_user/"$base_db_user"/g sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql +sed -i s/\$tenant_id/"$tenant_id"/g sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql +sed -i s/\$tenant_ontology/"$tenant_ontology"/g sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql +echo +echo "Running upgrade script: sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql" +db2 -stvf sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/UpgradeBaseDB.bat b/ACA/configuration-ha/DB2/UpgradeBaseDB.bat new file mode 100644 index 00000000..9b4b6e2a --- /dev/null +++ b/ACA/configuration-ha/DB2/UpgradeBaseDB.bat @@ -0,0 +1,42 @@ +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SETLOCAL + +set /p base_db_name= Please enter a valid value for the base database name : +set /p base_db_user= Please enter a valid value for the base database user name : + + +echo +echo "-- Please confirm these are the desired settings:" +echo " - Base database name: %base_db_name%" +echo " - Base database user name: %base_db_user%" + +set /P c=Are you sure you want to continue[Y/N]? +if /I "%c%" EQU "Y" goto :DOCREATE +if /I "%c%" EQU "N" goto :DOEXIT + +:DOCREATE + echo "Connecting to db and schema" + db2 CONNECT TO %base_db_name% + db2 SET SCHEMA %base_db_user% + db2 alter table tenantinfo add column last_job_run_time BIGINT not null with default 0 + db2 alter table tenantinfo add column dbstatus smallint not null with default 0 + db2 update tenantinfo set bacaversion = 1.5 + db2 connect reset + goto END +:DOEXIT + echo "Exited on user input" + goto END +:END + echo "END" + +ENDLOCAL diff --git a/ACA/configuration-ha/DB2/UpgradeBaseDB.sh b/ACA/configuration-ha/DB2/UpgradeBaseDB.sh new file mode 100755 index 00000000..eb129ebc --- /dev/null +++ b/ACA/configuration-ha/DB2/UpgradeBaseDB.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + +. ./ScriptFunctions.sh + +INPUT_PROPS_FILENAME="./common_for_DB2_Upgrade.sh" + +if [ -f $INPUT_PROPS_FILENAME ]; then + echo "Found a $INPUT_PROPS_FILENAME. Reading in variables from that script." + . $INPUT_PROPS_FILENAME +fi + +echo -e "\n-- This script will upgrade base DB" +echo + +while [[ $base_db_name == '' ]] +do + echo "Please enter a valid value for the base database name :" + read base_db_name + while [ ${#base_db_name} -gt 8 ]; + do + echo "Please enter a valid value for the base database name :" + read base_db_name; + echo ${#base_db_name}; + done +done + +while [[ -z "$base_db_user" || $base_db_user == "" ]] +do + echo "Please enter a valid value for the base database user name :" + read base_db_user +done + +echo +echo "-- Please confirm these are the desired settings:" +echo " - Base database name: $base_db_name" +echo " - Base database user name: $base_db_user" +askForConfirmation + + +cp sql/UpgradeBaseDB_1.4_to_1.5.sql.template sql/UpgradeBaseDB_1.4_to_1.5.sql +sed -i s/\$base_db_name/"$base_db_name"/ sql/UpgradeBaseDB_1.4_to_1.5.sql +sed -i s/\$base_db_user/"$base_db_user"/ sql/UpgradeBaseDB_1.4_to_1.5.sql +echo +echo "Running upgrade script: sql/UpgradeBaseDB_1.4_to_1.5.sql" +db2 -stvf sql/UpgradeBaseDB_1.4_to_1.5.sql \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/UpgradeBaseDB_1.0_to_1.2.sh b/ACA/configuration-ha/DB2/UpgradeBaseDB_1.0_to_1.2.sh index 8409eb48..abcd35dc 100755 --- a/ACA/configuration-ha/DB2/UpgradeBaseDB_1.0_to_1.2.sh +++ b/ACA/configuration-ha/DB2/UpgradeBaseDB_1.0_to_1.2.sh @@ -1,4 +1,12 @@ #!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + . ./ScriptFunctions.sh INPUT_PROPS_FILENAME="./common_for_DB2_Upgrade.sh" diff --git a/ACA/configuration-ha/DB2/UpgradeBaseDB_1_0_to_1.2.bat b/ACA/configuration-ha/DB2/UpgradeBaseDB_1_0_to_1.2.bat index f2998146..bb2b5812 100644 --- a/ACA/configuration-ha/DB2/UpgradeBaseDB_1_0_to_1.2.bat +++ b/ACA/configuration-ha/DB2/UpgradeBaseDB_1_0_to_1.2.bat @@ -1,4 +1,13 @@ @echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ SETLOCAL diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB.bat b/ACA/configuration-ha/DB2/UpgradeTenantDB.bat index 2246a226..b29569ae 100644 --- a/ACA/configuration-ha/DB2/UpgradeTenantDB.bat +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB.bat @@ -20,7 +20,7 @@ if /I "%c%" EQU "N" goto :DOEXIT echo "Connecting to db and schema" db2 connect to %tenant_db_name% db2 set schema %tenant_ontology% - db2 -stvf sql\WinUpgradeTenantDB_1.3_to_1.4.sql + db2 -stvf sql\WinUpgradeTenantDB_1.4_to_1.5.sql goto END :DOEXIT echo "Exited on user input" diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB.sh b/ACA/configuration-ha/DB2/UpgradeTenantDB.sh index 18d9f6eb..f08f7b95 100755 --- a/ACA/configuration-ha/DB2/UpgradeTenantDB.sh +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB.sh @@ -1,4 +1,11 @@ #!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end . ./ScriptFunctions.sh if [[ -z $INPUT_PROPS_FILENAME ]]; then @@ -10,7 +17,7 @@ if [ -f $INPUT_PROPS_FILENAME ]; then . $INPUT_PROPS_FILENAME fi -echo -e "\n-- This script will upgrade tenant DB from v1.3 to v1.4" +echo -e "\n-- This script will upgrade tenant DB from v1.4 to v1.5" echo while [[ $tenant_db_name == '' ]] @@ -44,11 +51,11 @@ echo " - tenant database name: $tenant_db_name" echo " - tenant database user name: $tenant_db_user" askForConfirmation -echo " -- upgrade from 1.3 to 1.4 ---" -cp sql/UpgradeTenantDB_1.3_to_1.4.sql.template sql/UpgradeTenantDB_1.3_to_1.4.sql -sed -i s/\$tenant_db_name/"$tenant_db_name"/ sql/UpgradeTenantDB_1.3_to_1.4.sql -sed -i s/\$tenant_ontology/"$tenant_ontology"/ sql/UpgradeTenantDB_1.3_to_1.4.sql -sed -i s/\$tenant_db_user/"$tenant_db_user"/ sql/UpgradeTenantDB_1.3_to_1.4.sql +echo " -- upgrade from 1.4 to 1.5 ---" +cp sql/UpgradeTenantDB_1.4_to_1.5.sql.template sql/UpgradeTenantDB_1.4_to_1.5.sql +sed -i s/\$tenant_db_name/"$tenant_db_name"/ sql/UpgradeTenantDB_1.4_to_1.5.sql +sed -i s/\$tenant_ontology/"$tenant_ontology"/ sql/UpgradeTenantDB_1.4_to_1.5.sql +sed -i s/\$tenant_db_user/"$tenant_db_user"/ sql/UpgradeTenantDB_1.4_to_1.5.sql echo -echo "Running upgrade script: sql/UpgradeTenantDB_1.3_to_1.4.sql" -db2 -stvf sql/UpgradeTenantDB_1.3_to_1.4.sql +echo "Running upgrade script: sql/UpgradeTenantDB_1.4_to_1.5.sql" +db2 -stvf sql/UpgradeTenantDB_1.4_to_1.5.sql diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.bat b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.bat index 89366f54..56a0e85c 100644 --- a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.bat +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.bat @@ -1,4 +1,13 @@ @echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ SETLOCAL diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.sh b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.sh index cc173b84..7752410c 100644 --- a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.sh +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.0_to_1.2.sh @@ -1,4 +1,11 @@ #!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end . ./ScriptFunctions.sh if [[ -z $INPUT_PROPS_FILENAME ]]; then diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.2_to_1.3.sh b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.2_to_1.3.sh index 6652d94c..583ec692 100644 --- a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.2_to_1.3.sh +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.2_to_1.3.sh @@ -1,4 +1,11 @@ #!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end . ./ScriptFunctions.sh if [[ -z $INPUT_PROPS_FILENAME ]]; then diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.3_to_1.4.bat b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.3_to_1.4.bat new file mode 100644 index 00000000..e8290d94 --- /dev/null +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.3_to_1.4.bat @@ -0,0 +1,40 @@ +@echo off +REM ************************************************************************************ +REM * @---lm_copyright_start +REM * 5737-I23, 5900-A30 +REM * Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +REM * U.S. Government Users Restricted Rights: +REM * Use, duplication or disclosure restricted by GSA ADP Schedule +REM * Contract with IBM Corp. +REM * @---lm_copyright_end +REM ************************************************************************************ + +SETLOCAL + +set /p tenant_db_name= Please enter a valid value for the tenant database name : +set /p tenant_db_user= Please enter a valid value for the tenant database user name : +set /p tenant_ontology= Please enter a valid value for the tenant ontology name : + +echo +echo "-- Please confirm these are the desired settings:" +echo " - tenant database name: %tenant_db_name%" +echo " - tenant database user name: %tenant_db_user%" +echo " - ontology name: %tenant_ontology%" + +set /P c=Are you sure you want to continue[Y/N]? +if /I "%c%" EQU "Y" goto :DOCREATE +if /I "%c%" EQU "N" goto :DOEXIT + +:DOCREATE + echo "Connecting to db and schema" + db2 connect to %tenant_db_name% + db2 set schema %tenant_ontology% + db2 -stvf sql\WinUpgradeTenantDB_1.3_to_1.4.sql + goto END +:DOEXIT + echo "Exited on user input" + goto END +:END + echo "END" + +ENDLOCAL diff --git a/ACA/configuration-ha/DB2/UpgradeTenantDB_1.3_to_1.4.sh b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.3_to_1.4.sh new file mode 100644 index 00000000..4c41ea41 --- /dev/null +++ b/ACA/configuration-ha/DB2/UpgradeTenantDB_1.3_to_1.4.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# @---lm_copyright_start +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end + +. ./ScriptFunctions.sh + +if [[ -z $INPUT_PROPS_FILENAME ]]; then + INPUT_PROPS_FILENAME="./common_for_DB2_Tenant_Upgrade.sh" +fi + +if [ -f $INPUT_PROPS_FILENAME ]; then + echo "Found a $INPUT_PROPS_FILENAME. Reading in variables from that script." + . $INPUT_PROPS_FILENAME +fi + +echo -e "\n-- This script will upgrade tenant DB from v1.3 to v1.4" +echo + +while [[ $tenant_db_name == '' ]] +do + echo "Please enter a valid value for the tenant database name :" + read tenant_db_name + while [ ${#tenant_db_name} -gt 8 ]; + do + echo "Please enter a valid value for the tenant database name :" + read tenant_db_name; + echo ${#tenant_db_name}; + done +done + +while [[ -z "$tenant_db_user" || $tenant_db_user == "" ]] +do + echo "Please enter a valid value for the tenant database user name :" + read tenant_db_user +done + +while [[ $tenant_ontology == '' ]] +do + echo "Please enter a valid value for the tenant ontology name :" + read tenant_ontology +done + +echo +echo "-- Please confirm these are the desired settings:" +echo " - ontology: $tenant_ontology" +echo " - tenant database name: $tenant_db_name" +echo " - tenant database user name: $tenant_db_user" +askForConfirmation + +echo " -- upgrade from 1.3 to 1.4 ---" +cp sql/UpgradeTenantDB_1.3_to_1.4.sql.template sql/UpgradeTenantDB_1.3_to_1.4.sql +sed -i s/\$tenant_db_name/"$tenant_db_name"/ sql/UpgradeTenantDB_1.3_to_1.4.sql +sed -i s/\$tenant_ontology/"$tenant_ontology"/ sql/UpgradeTenantDB_1.3_to_1.4.sql +sed -i s/\$tenant_db_user/"$tenant_db_user"/ sql/UpgradeTenantDB_1.3_to_1.4.sql +echo +echo "Running upgrade script: sql/UpgradeTenantDB_1.3_to_1.4.sql" +db2 -stvf sql/UpgradeTenantDB_1.3_to_1.4.sql diff --git a/ACA/configuration-ha/DB2/sql/CreateBacaTables.sql b/ACA/configuration-ha/DB2/sql/CreateBacaTables.sql index 872b11e4..e35330f2 100644 --- a/ACA/configuration-ha/DB2/sql/CreateBacaTables.sql +++ b/ACA/configuration-ha/DB2/sql/CreateBacaTables.sql @@ -21,7 +21,7 @@ create table doc_alias CONSTRAINT doc_alias_doc_alias_name_key UNIQUE (doc_alias_name) ); --- tables for object type library - (new)object_type, implementation, implementation_kc; (modified) key_class +-- tables for object type library create table object_type ( object_type_id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), @@ -33,6 +33,7 @@ create table object_type flags INTEGER, version INTEGER, description VARCHAR(1024), + config BLOB (10M) NOT NULL default BLOB('e30='), CONSTRAINT object_type_object_type_id_key UNIQUE (object_type_id), CONSTRAINT object_type_pkey PRIMARY KEY (scope, symbolic_name) @@ -43,10 +44,12 @@ create table key_class key_class_id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), key_class_name VARCHAR (512) NOT NULL, datatype INTEGER NOT NULL, - mandatory BOOLEAN, + mandatory INTEGER NOT NULL, sensitive BOOLEAN, comment VARCHAR(1024), config BLOB (10M) NOT NULL default empty_blob(), + flags SMALLINT NOT NULL default 0, + parent_id INTEGER NOT NULL default 0, CONSTRAINT key_class_pkey PRIMARY KEY (key_class_id), @@ -65,6 +68,25 @@ create table key_alias CONSTRAINT key_alias_key_alias_name_key UNIQUE (key_alias_name) ); +-- table to store the aliases of attribute instances inside key class +create table alias +( + key_class_id INTEGER NOT NULL, + alias_name VARCHAR (512) NOT NULL, + language CHAR(3) NOT NULL, + parent_id INTEGER NOT NULL, + + CONSTRAINT alias_pkey PRIMARY KEY (key_class_id, alias_name), + + CONSTRAINT alias_parent_id_alias_name_key UNIQUE (parent_id, alias_name), + + CONSTRAINT alias_key_class_id_fkey FOREIGN KEY (key_class_id) REFERENCES key_class (key_class_id) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT alias_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES key_class (key_class_id) + ON UPDATE RESTRICT ON DELETE CASCADE +); + create table cword ( cword_id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), @@ -375,23 +397,23 @@ create table fonts_transid CONSTRAINT fonts_transid_transid_key UNIQUE (transid) ); -create table db_backup -( - id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), - date BIGINT NOT NULL, - frequency CHAR(15) NOT NULL, - type VARCHAR(1024) NOT NULL, - start_time BIGINT, - end_time BIGINT, - complete BOOLEAN DEFAULT 0, - failure BOOLEAN DEFAULT 0, - obj_cred_id INTEGER NOT NULL, - - CONSTRAINT db_backup_pkey PRIMARY KEY (id) - - --CONSTRAINT db_backup_obj_cred_id_fkey FOREIGN KEY (obj_cred_id) REFERENCES api_integrations_objectsstore (obj_cred_id) - --ON UPDATE RESTRICT ON DELETE CASCADE -); +-- create table db_backup +-- ( +-- id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), +-- date BIGINT NOT NULL, +-- frequency CHAR(15) NOT NULL, +-- type VARCHAR(1024) NOT NULL, +-- start_time BIGINT, +-- end_time BIGINT, +-- complete BOOLEAN DEFAULT 0, +-- failure BOOLEAN DEFAULT 0, +-- obj_cred_id INTEGER NOT NULL, +-- +-- CONSTRAINT db_backup_pkey PRIMARY KEY (id) +-- +-- --CONSTRAINT db_backup_obj_cred_id_fkey FOREIGN KEY (obj_cred_id) REFERENCES api_integrations_objectsstore (obj_cred_id) +-- --ON UPDATE RESTRICT ON DELETE CASCADE +-- ); create table key_spacing ( @@ -442,16 +464,16 @@ create table error_log ON UPDATE RESTRICT ON DELETE CASCADE ); -create table db_restore -( - id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), - start_time BIGINT, - end_time BIGINT, - complete BOOLEAN DEFAULT FALSE, - failure BOOLEAN DEFAULT FALSE, - - CONSTRAINT db_restore_pkey PRIMARY KEY (id) -); +-- create table db_restore +-- ( +-- id INTEGER NOT NULL GENERATED ALWAYS AS IDENTITY (START WITH 1 INCREMENT BY 1 NO CYCLE), +-- start_time BIGINT, +-- end_time BIGINT, +-- complete BOOLEAN DEFAULT FALSE, +-- failure BOOLEAN DEFAULT FALSE, +-- +-- CONSTRAINT db_restore_pkey PRIMARY KEY (id) +-- ); --flags -0 user defined and default 1. will be training set detected --rank -relative importance number 0.0 to 1.0 @@ -795,30 +817,79 @@ alter table processed_file add foreign key classifier_id_fkey(classifier_id) RE reorg table processed_file; -- END kvp/ml feature changes +--- Indexes ------- +create index ix_audit_api_activity_date ON audit_api_activity(date); +create index ix_audit_integration_activity_date ON audit_integration_activity(date); +create index ix_audit_login_activity_date ON audit_login_activity(date); +create index ix_audit_ontology_date ON audit_ontology(date); +create index ix_audit_processed_files_date ON audit_processed_files(date); +create index ix_audit_system_activity_date ON audit_system_activity(date); +create index ix_audit_user_activity_date ON audit_user_activity(date); +create index ix_error_log_date ON error_log(date); +create index ix_processed_file_date on processed_file(date); +--- End Indexes ------- + --replace mongo DB2 tables create table runtime_doc ( - transaction_id VARCHAR(256) NOT NULL , - initial_upload_time bigint, - file_name VARCHAR(1024), - org_content BLOB(250M) INLINE LENGTH 5120, - utf_content BLOB(250M) INLINE LENGTH 5120, - pdf_content BLOB(250M) INLINE LENGTH 5120, - wds_content BLOB(250M) INLINE LENGTH 5120, - params BLOB(250M) INLINE LENGTH 5120, - CONSTRAINT runtime_doc_pkey PRIMARY KEY (transaction_id) -); + TRANSACTION_ID VARCHAR(256) NOT NULL, + INITIAL_START_TIME BIGINT, + FILE_NAME VARCHAR(1024), + ORG_CONTENT BLOB(250M) INLINE LENGTH 5120, + UTF_CONTENT BLOB(250M), + PDF_CONTENT BLOB(250M), + WDS_CONTENT BLOB(250M), + DOC_PARAMS BLOB(10M), + FLAGS BIGINT NOT NULL DEFAULT 0, + API SMALLINT NOT NULL DEFAULT 0, + COMPLETED SMALLINT NOT NULL DEFAULT 0, + FAILED SMALLINT NOT NULL DEFAULT 0, + DOCUMENTACCURACY INTEGER NOT NULL DEFAULT 0, + COMPLETED_OCR_PAGES INTEGER NOT NULL DEFAULT 0, + OCR_PAGES_VERIFIED SMALLINT NOT NULL DEFAULT 0, + PROGRESS DECIMAL(5,2), + PARTIAL_COMPLETE_PAGES INTEGER NOT NULL DEFAULT 0, + COMPLETED_PAGES INTEGER NOT NULL DEFAULT 0, + VERIFIED SMALLINT NOT NULL DEFAULT 0, + USER_ID INTEGER NOT NULL DEFAULT 0, + PDF SMALLINT NOT NULL DEFAULT 0, + PDF_SUCCESS SMALLINT NOT NULL DEFAULT 0, + PDF_ERROR_LIST VARCHAR(1024), + PDF_PARAMS BLOB(1M), + UTF8 SMALLINT NOT NULL DEFAULT 0, + UTF8_SUCCESS SMALLINT NOT NULL DEFAULT 0, + UTF8_ERROR_LIST VARCHAR(1024), + UTF8_PARAMS BLOB(1M), + TITLE_LIST VARCHAR(32000), + ALIAS_LIST BLOB(1M), + + CONSTRAINT runtime_doc_pkey PRIMARY KEY (TRANSACTION_ID) +); + +create index IX_INITIAL_START_TIME ON runtime_doc(INITIAL_START_TIME); create table runtime_page ( - transaction_id VARCHAR(256) NOT NULL, - page_id SMALLINT NOT NULL, - jpg_content BLOB(250M) INLINE LENGTH 5120, - params BLOB(250M) INLINE LENGTH 5120, - - CONSTRAINT runtime_page_transaction_id_fkey FOREIGN KEY (transaction_id) REFERENCES runtime_doc (transaction_id) + TRANSACTION_ID VARCHAR(256) NOT NULL, + PAGE_ID SMALLINT NOT NULL, + JPG_CONTENT BLOB(250M), + PAGE_UUID VARCHAR(256), + PAGE_PARAMS BLOB(10M), + FLATTENEDJSON BLOB(10M), + GOODLETTERS INTEGER NOT NULL DEFAULT 0, + ALLLETTERS INTEGER NOT NULL DEFAULT 0, + COMPLETE SMALLINT NOT NULL DEFAULT 0, + OCR_CONFIDENCE VARCHAR(20), + LANGUAGES VARCHAR(256), + FLAGS BIGINT NOT NULL DEFAULT 0, + BAGOFWORDS BLOB(1M), + HEADER_LIST BLOB(1M), + FOUNDKEYLIST VARCHAR(1024), + DEFINEDKEYLIST VARCHAR(1024), + + CONSTRAINT runtime_page_transaction_id_fkey FOREIGN KEY (TRANSACTION_ID) REFERENCES runtime_doc (TRANSACTION_ID) ON UPDATE RESTRICT ON DELETE CASCADE, - CONSTRAINT runtime_page_pkey PRIMARY KEY (transaction_id, page_id) + CONSTRAINT runtime_page_pkey PRIMARY KEY (TRANSACTION_ID, PAGE_ID) ); --End replace mongo DB2 tables diff --git a/ACA/configuration-ha/DB2/sql/CreateBaseTable.sql.template b/ACA/configuration-ha/DB2/sql/CreateBaseTable.sql.template index 08abaae0..d7228e83 100644 --- a/ACA/configuration-ha/DB2/sql/CreateBaseTable.sql.template +++ b/ACA/configuration-ha/DB2/sql/CreateBaseTable.sql.template @@ -6,7 +6,7 @@ SET SCHEMA $base_db_user ; --Going forward bacaversion is base db schema version --tenantdbversion is tenant and ontology schema version -CREATE TABLE TENANTINFO +CREATE TABLE TENANTINFO (tenantid varchar(128) NOT NULL, ontology varchar(128) not null, tenanttype smallint not null with default, @@ -20,6 +20,8 @@ CREATE TABLE TENANTINFO mongoadminconnection varchar(1024) for bit data default null, featureflags bigint not null with default 0, tenantdbversion varchar(255), + last_job_run_time BIGINT not null with default 0, + dbstatus smallint not null with default 0, CONSTRAINT tenantinfo_pkey PRIMARY KEY (tenantid, ontology) ); diff --git a/ACA/configuration-ha/DB2/sql/DropBacaTables.sql b/ACA/configuration-ha/DB2/sql/DropBacaTables.sql index 0d14b15e..bbe0d9e1 100644 --- a/ACA/configuration-ha/DB2/sql/DropBacaTables.sql +++ b/ACA/configuration-ha/DB2/sql/DropBacaTables.sql @@ -32,6 +32,7 @@ drop table cword_dc; drop table key_alias_kc; drop table key_alias_dc; drop table key_class_dc; +drop table alias; drop table implementation_kc; drop table doc_alias_dc; drop table key_alias; @@ -49,5 +50,7 @@ drop table kvp_model_detail; drop table document; drop table runtime_page; drop table runtime_doc; +drop table pageparams; +drop table docparams; drop sequence MINOR_VER_SEQ; diff --git a/ACA/configuration-ha/DB2/sql/InsertTenant.sql.template b/ACA/configuration-ha/DB2/sql/InsertTenant.sql.template index 5e531af7..56809aea 100644 --- a/ACA/configuration-ha/DB2/sql/InsertTenant.sql.template +++ b/ACA/configuration-ha/DB2/sql/InsertTenant.sql.template @@ -1,4 +1,4 @@ connect to $base_db_name ; set schema $base_db_user ; -insert into TENANTINFO (tenantid,ontology,tenanttype,dailylimit,rdbmsengine,bacaversion,rdbmsconnection,dbname,dbuser,tenantdbversion,featureflags) values ( '$tenant_id', '$tenant_ontology', $tenant_type, $daily_limit, 'DB2', '1.4', encrypt('$rdbmsconnection','AES_KEY'),'$tenant_db_name','$tenant_db_user','1.4',4) ; +insert into TENANTINFO (tenantid,ontology,tenanttype,dailylimit,rdbmsengine,bacaversion,rdbmsconnection,dbname,dbuser,tenantdbversion,featureflags) values ( '$tenant_id', '$tenant_ontology', $tenant_type, $daily_limit, 'DB2', '1.5', encrypt('$rdbmsconnection','AES_KEY'),'$tenant_db_name','$tenant_db_user','1.5',6) ; connect reset ; diff --git a/ACA/configuration-ha/DB2/sql/TablePermissions.sql.template b/ACA/configuration-ha/DB2/sql/TablePermissions.sql.template index ea446f24..bbbb49d2 100644 --- a/ACA/configuration-ha/DB2/sql/TablePermissions.sql.template +++ b/ACA/configuration-ha/DB2/sql/TablePermissions.sql.template @@ -15,10 +15,10 @@ GRANT ALTER ON TABLE $tenant_ontology.API_INTEGRATIONS_OBJECTSSTORE TO USER $ten GRANT ALTER ON TABLE $tenant_ontology.SMARTPAGES_OPTIONS TO USER $tenant_db_user ; GRANT ALTER ON TABLE $tenant_ontology.FONTS TO USER $tenant_db_user ; GRANT ALTER ON TABLE $tenant_ontology.FONTS_TRANSID TO USER $tenant_db_user ; -GRANT ALTER ON TABLE $tenant_ontology.DB_BACKUP TO USER $tenant_db_user ; GRANT ALTER ON TABLE $tenant_ontology.PATTERN TO USER $tenant_db_user ; GRANT ALTER ON TABLE $tenant_ontology.DOCUMENT TO USER $tenant_db_user ; GRANT ALTER ON TABLE $tenant_ontology.TRAINING_LOG TO USER $tenant_db_user ; GRANT ALTER ON TABLE $tenant_ontology.IMPLEMENTATION TO USER $tenant_db_user ; +GRANT ALTER ON TABLE $tenant_ontology.ALIAS TO USER $tenant_db_user ; CONNECT RESET; diff --git a/ACA/configuration-ha/DB2/sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql.template b/ACA/configuration-ha/DB2/sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql.template new file mode 100644 index 00000000..1d2e5dee --- /dev/null +++ b/ACA/configuration-ha/DB2/sql/UpdateTenantInfo_in_BaseDB_1.3_to_1.4.sql.template @@ -0,0 +1,8 @@ +--base DB changes +connect to $base_db_name ; +set schema $base_db_user ; + +update tenantinfo set FEATUREFLAGS=(4 | (select FEATUREFLAGS from tenantinfo where TENANTID='$tenant_id' and ONTOLOGY='$tenant_ontology')) where TENANTID='$tenant_id' and ONTOLOGY='$tenant_ontology' ; +update tenantinfo set TENANTDBVERSION=1.4 where TENANTID='$tenant_id' and ONTOLOGY='$tenant_ontology' ; + +connect reset ; \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql.template b/ACA/configuration-ha/DB2/sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql.template new file mode 100644 index 00000000..752e3aab --- /dev/null +++ b/ACA/configuration-ha/DB2/sql/UpdateTenantInfo_in_BaseDB_1.4_to_1.5.sql.template @@ -0,0 +1,7 @@ +--base DB changes +connect to $base_db_name ; +set schema $base_db_user ; + +update tenantinfo set TENANTDBVERSION=1.5 where TENANTID='$tenant_id' and ONTOLOGY='$tenant_ontology' ; +update tenantinfo set FEATUREFLAGS=(2 | (select FEATUREFLAGS from tenantinfo where TENANTID='$tenant_id' and ONTOLOGY='$tenant_ontology')) where TENANTID='$tenant_id' and ONTOLOGY='$tenant_ontology' ; +connect reset ; \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/sql/UpgradeBaseDB_1.4_to_1.5.sql.template b/ACA/configuration-ha/DB2/sql/UpgradeBaseDB_1.4_to_1.5.sql.template new file mode 100644 index 00000000..fc9827e7 --- /dev/null +++ b/ACA/configuration-ha/DB2/sql/UpgradeBaseDB_1.4_to_1.5.sql.template @@ -0,0 +1,10 @@ +--base DB changes +connect to $base_db_name ; +set schema $base_db_user ; + +alter table tenantinfo add column last_job_run_time BIGINT not null with default 0; +alter table tenantinfo add column dbstatus smallint not null with default 0; +update tenantinfo set bacaversion = 1.5; +reorg table tenantinfo; + +connect reset; diff --git a/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.3_to_1.4.sql.template b/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.3_to_1.4.sql.template index 43ec9ff8..f090b6ce 100644 --- a/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.3_to_1.4.sql.template +++ b/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.3_to_1.4.sql.template @@ -140,4 +140,55 @@ create table runtime_page CONSTRAINT runtime_page_pkey PRIMARY KEY (transaction_id, page_id) ); + +create table docparams +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + DOC_PARAMS BLOB(1M), + API SMALLINT NOT NULL DEFAULT 0, + COMPLETED SMALLINT NOT NULL DEFAULT 0, + FAILED SMALLINT NOT NULL DEFAULT 0, + DOCUMENTACCURACY INTEGER NOT NULL DEFAULT 0, + COMPLETED_OCR_PAGES INTEGER NOT NULL DEFAULT 0, + OCR_PAGES_VERIFIED SMALLINT NOT NULL DEFAULT 0, + PROGRESS SMALLINT NOT NULL DEFAULT 0, + PARTIAL_COMPLETE_PAGES INTEGER NOT NULL DEFAULT 0, + COMPLETED_PAGES INTEGER NOT NULL DEFAULT 0, + VERIFIED SMALLINT NOT NULL DEFAULT 0, + USER_ID INTEGER NOT NULL DEFAULT 0, + PDF SMALLINT NOT NULL DEFAULT 0, + PDF_SUCCESS SMALLINT NOT NULL DEFAULT 0, + PDF_ERROR_LIST VARCHAR(1024), + PDF_PARAMS BLOB(1M), + UTF8 SMALLINT NOT NULL DEFAULT 0, + UTF8_SUCCESS SMALLINT NOT NULL DEFAULT 0, + UTF8_ERROR_LIST VARCHAR(1024), + UTF8_PARAMS BLOB(1M), + TITLE_LIST VARCHAR(32000), + ALIAS_LIST BLOB(1M), + CONSTRAINT docparams_pkey PRIMARY KEY (transaction_id) +); + +create table pageparams +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + PAGE_ID INTEGER NOT NULL, + PAGE_UUID VARCHAR(256), + PAGE_PARAMS BLOB(10M), + FLATTENEDJSON BLOB(10M), + GOODLETTERS INTEGER NOT NULL DEFAULT 0, + ALLLETTERS INTEGER NOT NULL DEFAULT 0, + COMPLETE SMALLINT NOT NULL DEFAULT 0, + OCR_CONFIDENCE VARCHAR(20), + LANGUAGES VARCHAR(256), + BAGOFWORDS BLOB(1M), + HEADER_LIST BLOB(1M), + FOUNDKEYLIST BLOB(1M), + DEFINEDKEYLIST BLOB(1M), + CONSTRAINT pageparams_transaction_id_fkey FOREIGN KEY (transaction_id) REFERENCES docparams (transaction_id) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT pageparams_pkey PRIMARY KEY (transaction_id, page_id) +); + --End replace mongo DB2 tables diff --git a/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.4_to_1.5.sql.template b/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.4_to_1.5.sql.template new file mode 100644 index 00000000..eeabc7f8 --- /dev/null +++ b/ACA/configuration-ha/DB2/sql/UpgradeTenantDB_1.4_to_1.5.sql.template @@ -0,0 +1,134 @@ +connect to $tenant_db_name ; +set schema $tenant_ontology ; + +-- table to store the aliases of attribute instances inside key class +create table alias +( + key_class_id INTEGER NOT NULL, + alias_name VARCHAR (512) NOT NULL, + language CHAR(3) NOT NULL, + parent_id INTEGER NOT NULL, + + CONSTRAINT alias_pkey PRIMARY KEY (key_class_id, alias_name), + + CONSTRAINT alias_parent_id_alias_name_key UNIQUE (parent_id, alias_name), + + CONSTRAINT alias_key_class_id_fkey FOREIGN KEY (key_class_id) REFERENCES key_class (key_class_id) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT alias_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES key_class (key_class_id) + ON UPDATE RESTRICT ON DELETE CASCADE +); + +GRANT ALTER ON TABLE $tenant_ontology.alias TO USER $tenant_db_user ; + +alter table object_type add column config BLOB(10M) NOT NULL default BLOB('e30='); +reorg table object_type; + +alter table key_class alter column mandatory set data type INTEGER; +reorg table key_class; + +alter table key_class add column flags SMALLINT NOT NULL default 0; +reorg table key_class; + +-- TODO: +-- insert into key_class row 0 for __root +-- (key_class_id, key_class_name, datatype, mandatory, sensitive, comment, config, flags, parent_id) +-- values +-- (0, '__root', 1, 0, 1, 'Reserved Key Class', BLOB('e30='), 0, 0, 0, 0); +-- reorg table key_class; + +alter table key_class add column parent_id INTEGER NOT NULL default 0; +reorg table key_class; + + +-- ************************************************************************************************** +-- delete unused tables +-- ************************************************************************************************** +drop table db_restore; +drop table db_backup; + +-- ************************************************************************************************** +-- create indexes on date fields +-- ************************************************************************************************** +create index ix_audit_api_activity_date ON audit_api_activity(date); +create index ix_audit_integration_activity_date ON audit_integration_activity(date); +create index ix_audit_login_activity_date ON audit_login_activity(date); +create index ix_audit_ontology_date ON audit_ontology(date); +create index ix_audit_processed_files_date ON audit_processed_files(date); +create index ix_audit_system_activity_date ON audit_system_activity(date); +create index ix_audit_user_activity_date ON audit_user_activity(date); +create index ix_error_log_date ON error_log(date); +create index ix_processed_file_date on processed_file(date); +-- ************************************************************************************************** +-- delete all db2 storage tables and recreate new two tables +-- ************************************************************************************************** +drop table pageparams; +drop table docparams; +drop table runtime_page; +drop table runtime_doc; + +--replace mongo DB2 tables +create table runtime_doc +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + INITIAL_START_TIME BIGINT, + FILE_NAME VARCHAR(1024), + ORG_CONTENT BLOB(250M) INLINE LENGTH 5120, + UTF_CONTENT BLOB(250M), + PDF_CONTENT BLOB(250M), + WDS_CONTENT BLOB(250M), + DOC_PARAMS BLOB(10M), + FLAGS BIGINT NOT NULL DEFAULT 0, + API SMALLINT NOT NULL DEFAULT 0, + COMPLETED SMALLINT NOT NULL DEFAULT 0, + FAILED SMALLINT NOT NULL DEFAULT 0, + DOCUMENTACCURACY INTEGER NOT NULL DEFAULT 0, + COMPLETED_OCR_PAGES INTEGER NOT NULL DEFAULT 0, + OCR_PAGES_VERIFIED SMALLINT NOT NULL DEFAULT 0, + PROGRESS DECIMAL(5,2), + PARTIAL_COMPLETE_PAGES INTEGER NOT NULL DEFAULT 0, + COMPLETED_PAGES INTEGER NOT NULL DEFAULT 0, + VERIFIED SMALLINT NOT NULL DEFAULT 0, + USER_ID INTEGER NOT NULL DEFAULT 0, + PDF SMALLINT NOT NULL DEFAULT 0, + PDF_SUCCESS SMALLINT NOT NULL DEFAULT 0, + PDF_ERROR_LIST VARCHAR(1024), + PDF_PARAMS BLOB(1M), + UTF8 SMALLINT NOT NULL DEFAULT 0, + UTF8_SUCCESS SMALLINT NOT NULL DEFAULT 0, + UTF8_ERROR_LIST VARCHAR(1024), + UTF8_PARAMS BLOB(1M), + TITLE_LIST VARCHAR(32000), + ALIAS_LIST BLOB(1M), + + CONSTRAINT runtime_doc_pkey PRIMARY KEY (TRANSACTION_ID) +); + +create index IX_INITIAL_START_TIME ON runtime_doc(INITIAL_START_TIME); + +create table runtime_page +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + PAGE_ID SMALLINT NOT NULL, + JPG_CONTENT BLOB(250M), + PAGE_UUID VARCHAR(256), + PAGE_PARAMS BLOB(10M), + FLATTENEDJSON BLOB(10M), + GOODLETTERS INTEGER NOT NULL DEFAULT 0, + ALLLETTERS INTEGER NOT NULL DEFAULT 0, + COMPLETE SMALLINT NOT NULL DEFAULT 0, + OCR_CONFIDENCE VARCHAR(20), + LANGUAGES VARCHAR(256), + FLAGS BIGINT NOT NULL DEFAULT 0, + BAGOFWORDS BLOB(1M), + HEADER_LIST BLOB(1M), + FOUNDKEYLIST VARCHAR(1024), + DEFINEDKEYLIST VARCHAR(1024), + + CONSTRAINT runtime_page_transaction_id_fkey FOREIGN KEY (TRANSACTION_ID) REFERENCES runtime_doc (TRANSACTION_ID) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT runtime_page_pkey PRIMARY KEY (TRANSACTION_ID, PAGE_ID) +); +--End replace mongo DB2 tables diff --git a/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.3_to_1.4.sql b/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.3_to_1.4.sql index 97bddccc..3f0f61fb 100644 --- a/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.3_to_1.4.sql +++ b/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.3_to_1.4.sql @@ -130,4 +130,55 @@ create table runtime_page CONSTRAINT runtime_page_pkey PRIMARY KEY (transaction_id, page_id) ); + +create table docparams +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + DOC_PARAMS BLOB(1M), + API SMALLINT NOT NULL DEFAULT 0, + COMPLETED SMALLINT NOT NULL DEFAULT 0, + FAILED SMALLINT NOT NULL DEFAULT 0, + DOCUMENTACCURACY INTEGER NOT NULL DEFAULT 0, + COMPLETED_OCR_PAGES INTEGER NOT NULL DEFAULT 0, + OCR_PAGES_VERIFIED SMALLINT NOT NULL DEFAULT 0, + PROGRESS SMALLINT NOT NULL DEFAULT 0, + PARTIAL_COMPLETE_PAGES INTEGER NOT NULL DEFAULT 0, + COMPLETED_PAGES INTEGER NOT NULL DEFAULT 0, + VERIFIED SMALLINT NOT NULL DEFAULT 0, + USER_ID INTEGER NOT NULL DEFAULT 0, + PDF SMALLINT NOT NULL DEFAULT 0, + PDF_SUCCESS SMALLINT NOT NULL DEFAULT 0, + PDF_ERROR_LIST VARCHAR(1024), + PDF_PARAMS BLOB(1M), + UTF8 SMALLINT NOT NULL DEFAULT 0, + UTF8_SUCCESS SMALLINT NOT NULL DEFAULT 0, + UTF8_ERROR_LIST VARCHAR(1024), + UTF8_PARAMS BLOB(1M), + TITLE_LIST VARCHAR(32000), + ALIAS_LIST BLOB(1M), + CONSTRAINT docparams_pkey PRIMARY KEY (transaction_id) +); + +create table pageparams +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + PAGE_ID INTEGER NOT NULL, + PAGE_UUID VARCHAR(256), + PAGE_PARAMS BLOB(10M), + FLATTENEDJSON BLOB(10M), + GOODLETTERS INTEGER NOT NULL DEFAULT 0, + ALLLETTERS INTEGER NOT NULL DEFAULT 0, + COMPLETE SMALLINT NOT NULL DEFAULT 0, + OCR_CONFIDENCE VARCHAR(20), + LANGUAGES VARCHAR(256), + BAGOFWORDS BLOB(1M), + HEADER_LIST BLOB(1M), + FOUNDKEYLIST BLOB(1M), + DEFINEDKEYLIST BLOB(1M), + CONSTRAINT pageparams_transaction_id_fkey FOREIGN KEY (transaction_id) REFERENCES docparams (transaction_id) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT pageparams_pkey PRIMARY KEY (transaction_id, page_id) +); + --End replace mongo DB2 tables \ No newline at end of file diff --git a/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.4_to_1.5.sql b/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.4_to_1.5.sql new file mode 100644 index 00000000..9d537693 --- /dev/null +++ b/ACA/configuration-ha/DB2/sql/WinUpgradeTenantDB_1.4_to_1.5.sql @@ -0,0 +1,139 @@ +connect to $tenant_db_name ; +set schema $tenant_ontology ; + +-- table to store the aliases of attribute instances inside key class +create table alias +( + key_class_id INTEGER NOT NULL, + alias_name VARCHAR (512) NOT NULL, + language CHAR(3) NOT NULL, + parent_id INTEGER NOT NULL, + + CONSTRAINT alias_pkey PRIMARY KEY (key_class_id, alias_name), + + CONSTRAINT alias_parent_id_alias_name_key UNIQUE (parent_id, alias_name), + + CONSTRAINT alias_key_class_id_fkey FOREIGN KEY (key_class_id) REFERENCES key_class (key_class_id) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT alias_parent_id_fkey FOREIGN KEY (parent_id) REFERENCES key_class (key_class_id) + ON UPDATE RESTRICT ON DELETE CASCADE +); + +GRANT ALTER ON TABLE $tenant_ontology.alias TO USER $tenant_db_user ; + + + +alter table object_type add column config BLOB(10M) NOT NULL default BLOB('e30='); +reorg table object_type; + + + +alter table key_class alter column mandatory set data type INTEGER; +reorg table key_class; + +alter table key_class add column flags SMALLINT NOT NULL default 0; +reorg table key_class; + +-- TODO: +-- insert into key_class row 0 for __root +-- (key_class_id, key_class_name, datatype, mandatory, sensitive, comment, config, flags, parent_id) +-- values +-- (0, '__root', 1, 0, 1, 'Reserved Key Class', BLOB('e30='), 0, 0, 0, 0); +-- reorg table key_class; + +alter table key_class add column parent_id INTEGER NOT NULL default 0; +reorg table key_class; + + +-- ************************************************************************************************** +-- delete unused tables +-- ************************************************************************************************** +drop table db_restore; +drop table db_backup; + +-- ************************************************************************************************** +-- create indexes on date fields +-- ************************************************************************************************** +create index ix_audit_api_activity_date ON audit_api_activity(date); +create index ix_audit_integration_activity_date ON audit_integration_activity(date); +create index ix_audit_login_activity_date ON audit_login_activity(date); +create index ix_audit_ontology_date ON audit_ontology(date); +create index ix_audit_processed_files_date ON audit_processed_files(date); +create index ix_audit_system_activity_date ON audit_system_activity(date); +create index ix_audit_user_activity_date ON audit_user_activity(date); +create index ix_error_log_date ON error_log(date); +create index ix_processed_file_date on processed_file(date); +-- ************************************************************************************************** +-- delete all db2 storage tables and recreate new two tables +-- ************************************************************************************************** +drop table pageparams; +drop table docparams; +drop table runtime_page; +drop table runtime_doc; + +--replace mongo DB2 tables +create table runtime_doc +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + INITIAL_START_TIME BIGINT, + FILE_NAME VARCHAR(1024), + ORG_CONTENT BLOB(250M) INLINE LENGTH 5120, + UTF_CONTENT BLOB(250M), + PDF_CONTENT BLOB(250M), + WDS_CONTENT BLOB(250M), + DOC_PARAMS BLOB(10M), + FLAGS BIGINT NOT NULL DEFAULT 0, + API SMALLINT NOT NULL DEFAULT 0, + COMPLETED SMALLINT NOT NULL DEFAULT 0, + FAILED SMALLINT NOT NULL DEFAULT 0, + DOCUMENTACCURACY INTEGER NOT NULL DEFAULT 0, + COMPLETED_OCR_PAGES INTEGER NOT NULL DEFAULT 0, + OCR_PAGES_VERIFIED SMALLINT NOT NULL DEFAULT 0, + PROGRESS DECIMAL(5,2), + PARTIAL_COMPLETE_PAGES INTEGER NOT NULL DEFAULT 0, + COMPLETED_PAGES INTEGER NOT NULL DEFAULT 0, + VERIFIED SMALLINT NOT NULL DEFAULT 0, + USER_ID INTEGER NOT NULL DEFAULT 0, + PDF SMALLINT NOT NULL DEFAULT 0, + PDF_SUCCESS SMALLINT NOT NULL DEFAULT 0, + PDF_ERROR_LIST VARCHAR(1024), + PDF_PARAMS BLOB(1M), + UTF8 SMALLINT NOT NULL DEFAULT 0, + UTF8_SUCCESS SMALLINT NOT NULL DEFAULT 0, + UTF8_ERROR_LIST VARCHAR(1024), + UTF8_PARAMS BLOB(1M), + TITLE_LIST VARCHAR(32000), + ALIAS_LIST BLOB(1M), + + CONSTRAINT runtime_doc_pkey PRIMARY KEY (TRANSACTION_ID) +); + +create index IX_INITIAL_START_TIME ON runtime_doc(INITIAL_START_TIME); + +create table runtime_page +( + TRANSACTION_ID VARCHAR(256) NOT NULL, + PAGE_ID SMALLINT NOT NULL, + JPG_CONTENT BLOB(250M), + PAGE_UUID VARCHAR(256), + PAGE_PARAMS BLOB(10M), + FLATTENEDJSON BLOB(10M), + GOODLETTERS INTEGER NOT NULL DEFAULT 0, + ALLLETTERS INTEGER NOT NULL DEFAULT 0, + COMPLETE SMALLINT NOT NULL DEFAULT 0, + OCR_CONFIDENCE VARCHAR(20), + LANGUAGES VARCHAR(256), + FLAGS BIGINT NOT NULL DEFAULT 0, + BAGOFWORDS BLOB(1M), + HEADER_LIST BLOB(1M), + FOUNDKEYLIST VARCHAR(1024), + DEFINEDKEYLIST VARCHAR(1024), + + CONSTRAINT runtime_page_transaction_id_fkey FOREIGN KEY (TRANSACTION_ID) REFERENCES runtime_doc (TRANSACTION_ID) + ON UPDATE RESTRICT ON DELETE CASCADE, + + CONSTRAINT runtime_page_pkey PRIMARY KEY (TRANSACTION_ID, PAGE_ID) +); + +-- End of new tables for db2 storage diff --git a/ACA/configuration-ha/security/aca-netpol.yaml b/ACA/configuration-ha/security/aca-netpol.yaml deleted file mode 100644 index 106f8b85..00000000 --- a/ACA/configuration-ha/security/aca-netpol.yaml +++ /dev/null @@ -1,13 +0,0 @@ -kind: NetworkPolicy -apiVersion: networking.k8s.io/v1 -metadata: - namespace: $KUBE_NAME_SPACE - name: aca-netpol -spec: - ingress: - - {} - podSelector: - matchLabels: - productID: ibm-dba-aca-prod - policyTypes: - - Ingress \ No newline at end of file diff --git a/ACA/configuration-ha/utils/aca_logs_collection.sh b/ACA/configuration-ha/utils/aca_logs_collection.sh new file mode 100644 index 00000000..450f0026 --- /dev/null +++ b/ACA/configuration-ha/utils/aca_logs_collection.sh @@ -0,0 +1,74 @@ +#!/usr/bin/env bash + +############################################################################### +# @---lm_copyright_start +# Licensed Materials - Property of IBM +# 5737-I23, 5900-A30 +# Copyright IBM Corp. 2018 - 2020. All Rights Reserved. +# U.S. Government Users Restricted Rights: +# Use, duplication or disclosure restricted by GSA ADP Schedule +# Contract with IBM Corp. +#@---lm_copyright_end +############################################################################### + + +#This script is used to collect logs for IBM Business Automation Content Analyzer. + +export CA_CONTAINERS="frontend,backend,callerapi,ocr-extraction,pdfprocess,setup,classifyprocess-classify,processing-extraction,postprocessing,updatefiledetail,utf8process" +export TMP_DIR="/tmp/aca" +echo "=======================================" + +echo -e "\x1B[1;31mThis is a utility script to collect the logs for all ACA pods and the logs will saved in $TMP_DIR directory . You must logon to your cluster and associate to the namespace where ACA is being deployed before running this script.. \x1B[0m" + +echo "=======================================" + +while [[ $confirm != "n" && $confirm != "y" && $confirm != "yes" && $confirm != "no" ]] +do + echo -e "\x1B[1;31mWould you like to continue (y/n):\x1B[0m" + read confirm + confirm=$(echo "$confirm" | tr '[:upper:]' '[:lower:]') +done + +if [[ $confirm == "n" || $confirm == "no" || $confirm == "N" || $confirm == "No" ]] +then + echo "Exiting...." + exit 1 +fi + +if [[ ! -d $TMP_DIR ]]; then + echo "Cannot find $TMP_DIR. Creating $TMP_DIR...." + mkdir -p $TMP_DIR +fi + +if [[ $? -ne 0 ]]; then + echo -e "\x1B[1;31mFailed to create $TMP_DIR. Please make sure you have permission to create sub-directories in /tmp\x1B[0m" + echo "Exiting...." + exit 1 +fi + +echo "About to get ACA logs from $(kubectl config current-context | awk -F '/' {'print $1'} ) namespace" + + +for c in $(echo $CA_CONTAINERS | sed "s/,/ /g") +do + if [[ $c == "frontend" || $c == "backend" ]]; then + echo "=======================================" + echo "Get the first pod for $c" + aca=$(kubectl get po |grep $c | head -1 | awk {'print $1'}) + echo "Tar up logs in $aca" + kubectl exec $aca -- tar -cf /var/www/app/current/$c.tar /var/log/$c + echo "Copy log from $aca to $TMP_DIR/$c" + kubectl cp $aca:/var/www/app/current/$c.tar $TMP_DIR/$c.tar + else + echo "=======================================" + echo "Get the first pod for $c" + aca=$(kubectl get po |grep $c | head -1 | awk {'print $1'}) + echo "Tar up logs in $aca" + kubectl exec $aca -- tar -cf /app/$c.tar /var/log/$c + echo "Copy log from $aca to $TMP_DIR/$c" + kubectl cp $aca:/app/$c.tar $TMP_DIR/$c.tar + fi + +done + +echo -e "\x1B[1;31mThe logs are located at $TMP_DIR \x1B[0m" \ No newline at end of file diff --git a/ADW/README_config.md b/ADW/README_config.md deleted file mode 100644 index 3f597eee..00000000 --- a/ADW/README_config.md +++ /dev/null @@ -1,106 +0,0 @@ -# Configuring IBM Automation Digital Worker - -The following instructions cover the basic configuration of IBM Automation Digital Worker. - - -## Prerequisites - -Digital Worker requires: -- A [User Management Service](../UMS/README_config.md) instance in order to protect access to Digital Worker designer and APIs -- An [IBM Business Automation Insights](../BAI/README_config.md) instance (recommended but also optional) in order to collect Digital Worker tasks events and monitor them -- An [IBM Business Automation Studio Resource Registry](../BAS/README_config.md) instance (recommended but also optional) in order to integrate with some other components in the pack - -Digital Worker includes 5 pods corresponding to the following services: - - Digital Worker Designer - - Digital Worker Tasks Runtime - - Digital Worker Management Server - - MongoDB - - NPM registry - -The services require CPU and memory resources. The following table lists the minimum requirements that are used as default values. - -| Component | CPU Minimum (m) | Memory Minimum (Mi) | -| ----------------------------------------| --------------- | -------------------- | -| Digital Worker Designer | 100 | 128 | -| Digital Worker Tasks Runtime | 100 | 128 | -| Digital Worker Management Server | 100 | 512 | -| MongoDB | 100 | 128 | -| NPM registry | 100 | 128 | - - -In addition to these 5 services there are 2 Jobs: - - Setup - - Registry - -## Preparing for Installation - -Before you configure, make sure that you have prepared your environment. For more information, see [Preparing to install IBM Automation Digital Worker](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_prepare_adwk8s.html). - -### Step 1: Configure the custom resource YAML file for your Automation Digital Worker deployment - -In your `my_icp4a_cr.yaml` file, update the `adw_configuration` section with the configuration parameters. See [IBM Automation Digital Worker parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_adw_K8s_parameters.html) to find the default values for each ADW parameter and customize these values in your file. - -> **Note**: The [configuration](configuration) folder provides sample configuration files that you might find useful. Download the files and edit them for your own customizations. - -#### Managed OpenShift on IBM Cloud Public -When installing ADW on Managed OpenShift on IBM Cloud Public, the MongoDB service should use Block Storage. -In your custom resource YAML file, the `adw_configuration.mongodb.persistence.storageClassName` parameter should be the name of a storage class that uses the **ibm.io/ibmc-block** provisioner (for instance **ibmc-block-bronze**). - -### Step 2: Applying Pod Security Policy - -Digital Worker requires a pod security policy to be bound to the target namespace prior to installation. To meet this requirement there may be cluster scoped as well as namespace scoped pre and post actions that need to occur. - -The predefined pod security policy name: [`ibm-restricted-psp`](https://ibm.biz/cpkspec-psp) has been verified for this chart, if your target namespace is bound to it there is no further action needed in terms of pod security policy. - -This chart also defines a custom PodSecurityPolicy which can be used to finely control the permissions/capabilities needed to deploy this chart. You can enable this custom PodSecurityPolicy using the OCP user interface or via the OCP CLI. - -Using the CLI you can apply the following YAML file to enable the custom pod security policy: -- [Custom PodSecurityPolicy definition](./configuration/adw-psp.yaml) - -After creating the policy, replace all occurrences of `< NAMESPACE >` with the name of namespace the operator is deployed in. Then apply using the following command: - -```bash -kubectl apply -f adw-psp.yaml -``` - -For the custom PodSecurityPolicy to take affect you must bind the ServiceAccount to a ClusterRole. This can be done via the command line using the folliowing command: - -```bash -kubectl create clusterrolebinding adw-clusterrolebinding --clusterrole=cluster-admin --serviceaccount=: -``` - -### Step 3: Prepare and Apply the Secret - -Using the [Preparing to install IBM Automation Digital Worker](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_prepare_adwk8s.html) and [IBM Automation Digital Worker parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_adw_K8s_parameters.html) pages, create `adw-secret.yaml` then apply it to your instance using the following command. - - -```bash -kubectl apply -f adw-secret.yaml -``` -> **Note**: An empty secret had been provided [adw-secret.yaml](configuration/adw-secret.yaml) - -## Complete the installation - -When you have finished editing the configuration file, go back to the relevant install or update page to configure other components and complete the deployment with the operator. - -Install pages: - - [Managed OpenShift installation page](../platform/roks/install.md#step-6-configure-the-software-that-you-want-to-install) - - [OpenShift installation page](../platform/ocp/install.md#step-6-configure-the-software-that-you-want-to-install) - - [Certified Kubernetes installation page](../platform/k8s/install.md#step-6-configure-the-software-that-you-want-to-install) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md#step-1-modify-the-software-that-is-installed) - - [Certified Kubernetes installation page](../platform/k8s/update.md) - - -## Troubleshooting -### Management pod not going into a ready state -If using dynamically provisioned storage, please ensure that the following line is present and set to true in your custom resource file. If not set the managment pod may fail as it needs to be able to write to the volume: - -```yaml -grantWritePermissionOnMountedVolumes: true -``` -### Digital Worker tile not present in Business Automation Studio - -When integrating with resource registry, either the management service is exposed, or mangement service should be exposed through a route in order to be reachable from resource registry. If you are using SSL the certificate used will require a CN to be set matching the pod name `< DEPLOYMENT NAME >-management` in the case when the management service is exposed or the route hostname in the case of management exposed through a route. diff --git a/ADW/configuration/adw-cr.yaml b/ADW/configuration/adw-cr.yaml deleted file mode 100644 index 86a54aca..00000000 --- a/ADW/configuration/adw-cr.yaml +++ /dev/null @@ -1,98 +0,0 @@ -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: adw-cr - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba - release: 20.0.1 -spec: - appVersion: 20.0.1 - adw_configuration: - global: - imagePullSecret: < IMAGE SECRET > - kubernetes: - serviceAccountName: "ibm-cp4a-operator" - - adwSecret: < SECRET > - - grantWritePermissionOnMountedVolumes: true - - logLevel: "error" - - networkPolicy: - enabled: true - - restartPolicy: Never - - registry: - endpoint: "" - - npmRegistry: - persistence: - enabled: true - useDynamicProvisioning: true - storageClassName: "< STORAGE CLASS NAME >" - - mongodb: - persistence: - enabled: true - useDynamicProvisioning: true - storageClassName: "< STORAGE CLASS NAME >" - - designer: - image: - repository: "< REGISTRY >/adw-designer" - tag: "20.0.1" - pullPolicy: "Always" - externalPort: 30708 - externalUrl: "" - - runtime: - image: - repository: "< REGISTRY >/adw-runtime" - tag: "20.0.1" - pullPolicy: "Always" - persistence: - useDynamicProvisioning: true - storageClassName: "< STORAGE CLASS NAME >" - service: - type: "NodePort" - externalPort: 30709 - runLogLevel: "warn" - externalUrl: "" - - management: - image: - repository: "< REGISTRY >/adw-management" - tag: "20.0.1" - pullPolicy: "Always" - persistence: - useDynamicProvisioning: true - storageClassName: "< STORAGE CLASS NAME >" - externalPort: 30710 - externalUrl: "" - - setup: - image: - repository: "< REGISTRY >/adw-setup" - tag: "20.0.1" - pullPolicy: "Always" - - init: - image: - repository: "< REGISTRY >/dba/adw-init" - tag: "20.0.1" - pullPolicy: "Always" - - baiKafka: - topic: "BAITOPICFORODM" - bootstrapServers: "" - securityProtocol: "SASL_SSL" - - baiElasticsearch: - url: "" - - oidc: - endpoint: "" diff --git a/BAI/README_config.md b/BAI/README_config.md deleted file mode 100644 index 895cdd45..00000000 --- a/BAI/README_config.md +++ /dev/null @@ -1,276 +0,0 @@ -# Configuring IBM® Business Automation Insights - -These instructions cover the basic configuration of IBM Business Automation Insights. - -In order to use Business Automation Insights with other components in the IBM Cloud Pak for Automation you also need to configure them to emit events. - -For more information on the IBM Cloud Pak for Automation, see the [IBM Cloud Pak for Automation Knowledge Center](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/welcome/kc_welcome_dba_distrib.html). - -## Before you start - -If you have not done so, go to the [IBM Cloud Pak for Automation 20.0.x](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_preparing_baik8s_prereq.html) Knowledge Center and follow the steps to prepare your environment for Business Automation Insights. - -This README will summarize a number of the preparation steps found in the Knowledge Center. For more information at each stage refer to the Knowledge Center links provided. - -## Step 1: Make a copy of the sample custom resource - -The IBM Cloud Pak for Automation operator uses a single custom resource to install the required Cloud Pak products. These instructions provide an example ICP4ACluster custom resource [`configuration/bai-sample-cr.yaml`](configuration/bai-sample-cr.yaml). You can use this yaml file to customize your Business Automation Insights install, then copy the `bai_configuration` section of the CR yaml to the single ICP4ACluster CR yaml for all Cloud Pak products. - -To begin customizing a basic installation first clone this repository and then copy the [`configuration/bai-sample-cr.yaml`](configuration/bai-sample-cr.yaml) configuration file into a working directory. - -## Step 2: Edit the custom resource - -Open the `bai-sample-cr.yaml` ICP4ACluster custom resource file in a text/code editor. - -There are a number of values you need to customize: - -* Change all occurrences of `` to the location of the registry hosting the Business Automation Insights Docker images - -* Change all occurrences of `` to the name of the Docker pull secret created above, for example `icp4apull` - -* Ensure the `tag` value for all configuration matches the Docker tag used for the Docker images in your repository - -### Step 2.1: Install and customize the Apache Kafka Configuration - -This section is part of the prerequisites stressed under the [Before you start](https://github.ibm.com/dba/cert-kubernetes/blob/master/BAI/README_config.md#before-you-start). If you already have a running Kafka, please skip the Step 2.1.1. - -#### Step 2.1.1: Install Apache Kafka with IBM Event Streams - -To install IBM Event Streams on OpenShift, see https://ibm.github.io/event-streams/installing/installing-openshift/. - -#### Step 2.1.2: Apache Kafka connection configuration - -To configure Business Automation Insights to interact with your installation of Apache Kafka you need to customize the `bai_configuration.kafka` section of the custom resource. - -Below is an example of a Kafka configuration: - -```yaml - - kafka: - bootstrapServers: "" - securityProtocol: "SASL_SSL" - username: "" - password: "" - serverCertificate: "" -``` - -For advanced Apache Kafka configuration, including security options, refer to the [IBM Business Automation Insights Knowledge Center - Apache Kafka parameters](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_bai_k8s_kafka_params.html). - -#### Step 2.1.3: Apache Kafka topic configuration - -Business Automation Insights uses a number of Apache Kafka topics. To customize the names of these topics, uncomment and alter the settings below: - -```yaml - settings: - egress: true - ingressTopic: ibm-bai-ingress - egressTopic: ibm-bai-egress - serviceTopic: ibm-bai-service -``` - -More information about this can be found in the [IBM Business Automation Insights Knowledge Center - Apache Kafka parameters](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_bai_k8s_kafka_params.html), including an explanation of egress functionality. - -### Step 2.2 Persistent Storage -When configuring Business Automation Insights you have a number of options regarding persistent storage. - -Below is a summary of the persistent storage used by Business Automation Insights: - -| Volume | Default volume name | Default Storage | Required | Access Mode | Number of volumes | -| --------------------------------- | ------------------------------------------ | --------------- | -------- | ------------- | ----------------- | -| Flink volume | -bai-pvc | 20Gi | Yes | ReadWriteMany | 1 | -| ElasticSearch Master | data--ibm-dba-ek-master-_replica_ | 10Gi | No | ReadWriteOnce | 1 per replica | -| ElasticSearch Data | data--ibm-dba-ek-data-_replica_ | 10Gi | No | ReadWriteOnce | 1 per replica | -| ElasticSearchSnapshot Storage | -es-snapshot-storage-pvc | 30Gi | No | ReadWriteMany | 1 | - -The Flink volume is used by multiple pods for normal operation of Business Automation Insights. For more information on the Business Automation Insights persistent volume configuration see [IBM Business Automation Insights Knowledge Center - Apache Flink parameters](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_bai_k8s_flink_params.html). - -If you are using the embedded ElasticSearch stack you can choose to enable persistence for the ElasticSearch nodes (with a volume for each replica of the master and data nodes), and for snapshot storage. For more information on the embedded ElasticSearch volume configuration see [IBM Business Automation Insights Knowledge Center - Elasticsearch parameters](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_bai_k8s_es_params.html) - -#### Example configuration using dynamic provisioning - -For more information about [Setting up dynamic provisioning](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_preparing_baik8s_dyn_prov.html). - -If your cluster has dynamic volume provisioning the example shows a storage configuration (as found in the `bai-sample-cr.yaml` file) when persistence is enabled: - -```yaml - persistence: - useDynamicProvisioning: true - - flinkPv: - storageClassName: "" - - ibm-dba-ek: - elasticsearch: - data: - storage: - persistent: true - useDynamicProvisioning: true - storageClass: "" - snapshotStorage: - enabled: true - useDynamicProvisioning: true - storageClassName: "" -``` - -This configuration creates the four `PersistentVolumeClaim` resources listed with the default configuration. To use dynamic provisioning, change all occurrences of `` and `` to the name of the storage classes appropriate for your deployment platform. - -> Note: The `bai_configuration.flinkPv.storageClassName` and `bai_configuration.ibm-dba-ek.elasticsearch.data.snapshotStorage.storageClassName` storage classes must be capable of access mode `ReadWriteMany`. Additional configuration may be required on some platforms to create a `ReadWriteMany` capable storage class. `bai_configuration.ibm-dba-ek.elasticsearch.data.storage.storageClass` requires a `ReadWriteOnce` access mode capable storage class, available by default on many cloud platforms. - -#### Example configuration using static provisioning - -For more information about [Setting up manual provisioning](https://www.ibm.com/support/knowledgecenter/en/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_preparing_baik8s_manual_prov.html). - -If you want to manually create `PersistentVolume` and `PersistentVolumeClaim` resources use the following template for an example configuration: - -```yaml - persistence: - useDynamicProvisioning: false - - flinkPv: - existingClaimName: "" - - ibm-dba-ek: - elasticsearch: - data: - storage: - persistent: true - useDynamicProvisioning: false - storageClass: "" - snapshotStorage: - enabled: true - useDynamicProvisioning: false - existingClaimName: "" -``` - -### Step 2.3 Product event processors - -By default, no event processor setup pods are started when Business Automation Insights is installed. The event processor setup pods are required in order to configure Business Automation Insights to be able to ingest events from other products in the IBM Cloud Pak for Automation. - -Each product has an `install` parameter in the `bai_configuration` custom resource section, as shown below: - -```yaml - ingestion: - install: false - image: - repository: /bai-ingestion - - adw: - install: false - image: - repository: /bai-adw - - bpmn: - install: false - image: - repository: /bai-bpmn - - bawadv: - install: false - image: - repository: /bai-bawadv - - icm: - install: false - image: - repository: /bai-icm - - odm: - install: false - image: - repository: /bai-odm - - content: - install: false - image: - repository: /bai-content -``` - -For each products that you want to process events from change the `install` parameter to `true`. For example to process events from IBM Operation Decision Manager set `spec.bai_configuration.odm.install` to `true`. - -## Step 3: Security configuration - -Business Automation Insights requires some additional security configuration. - -### Step 3.1: Create security configuration - -Use the following template to create a [`BAI/configuration/bai-psp-yaml`](configuration/bai-psp.yaml) file containing the required `PodSecurityPolicy`, `Role`, `RoleBinding` and `ServiceAccount` resources needed by BAI. - -**Example bai-psp.yaml** - -```yaml -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - annotations: - kubernetes.io/description: "This policy is required to allow ibm-dba-ek pods running Elasticsearch to use privileged containers." - name: -bai-psp -spec: - privileged: true - runAsUser: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - fsGroup: - rule: RunAsAny - volumes: - - '*' ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: -bai-role -rules: -- apiGroups: - - extensions - resourceNames: - - -bai-psp - resources: - - podsecuritypolicies - verbs: - - use ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: -bai-psp-sa ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: -bai-rolebinding -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: -bai-role -subjects: -- kind: ServiceAccount - name: -bai-psp-sa -``` - -After creating the file, replace all occurrences of `` with the name of your ICP4ACluster custom resource created in Step 3. - -### Step 3.2: Apply the security configuration - -To apply the configuration you can use the `kubectl` command line utility: - -```bash -kubectl apply -f bai-psp.yaml -``` - -For RedHat OpenShift, additional policies may be required to enable the `Pod` resources to start containers using the required UIDs. To ensure these containers can start use the `oc` command to add the service accounts to the required `privileged` SCC: - -```bash -oc adm policy add-scc-to-user privileged -z -bai-psp-sa -oc adm policy add-scc-to-user privileged -z default -``` - -## Step 4: Complete the installation - -Go back to the relevant install or update page to configure other components and complete the deployment with the operator. - -Install pages: - - [Managed OpenShift installation page](../platform/roks/install.md) - - [OpenShift installation page](../platform/ocp/install.md) - - [Certified Kubernetes installation page](../platform/k8s/install.md) diff --git a/BAI/README_uninstall.md b/BAI/README_uninstall.md deleted file mode 100644 index 64238dd6..00000000 --- a/BAI/README_uninstall.md +++ /dev/null @@ -1,55 +0,0 @@ -# Uninstall - -> **WARNING** If you have used dynamic provisioning to provision the snapshot storage that is used by the embedded Elasticsearch, the persistent volume claim (PVC) will be deleted as part of the uninstallation process. Before you follow these instructions, back up any snapshots. - -## Step 1: Uninstalling custom resources - -You can find detailed instructions on the uninstallation page for your platform. - - [Managed OpenShift installation page](../platform/roks/uninstall.md) - - [OpenShift installation page](../platform/ocp/uninstall.md) - - [Certified Kubernetes installation page](../platform/k8s/uninstall.md) - -## Step 2: Deallocating storage - -To clean up storage used by Business Automation Insights, follow these instructions. - -### Statically provisioned storage - -If you chose to statically provision storage for Flink or snapshot storage, the persistent volume claims (PVCs) and persistent volumes (PVs) that you manually created are not deleted when the custom resource is removed. To completely remove all data, delete this storage manually. - -### Embedded Elasticsearch volumes - -If you installed with embedded Elasticsearch enabled, the volumes that were created for the *master* and *data* replicas of the Elasticsearch pods are not deleted by the uninstallation process. To remove an installation completely, delete the relevant PVCs and PVs by using this command. - -```bash -kubectl delete pvc/ -``` - -For example: - -```bash -kubectl delete pvc/data-bai-ibm-dba-ek-data-0 -``` - -To get a list of all PVCs, run this command. - -```bash -kubectl get pvc -``` - -If you are working on a Red Hat OpenShift platform, use the `oc` command instead of `kubectl`. - -## Step 3: Removing the security configuration - -If you used the `bai-psp.yaml` file that is referenced in [README_config.yaml](README_config.yaml) to install the required `PodSecurityPolicy`, `Role`, `RoleBinding`, and `ServiceAccount` resources that are needed by Business Automation Insights, remove this configuration by using this command. - -```bash -kubectl delete -f bai-psp.yaml -``` - -If you are working on a Red Hat OpenShift platform, it is advised that you also remove the default service account and Business Automation Insights service account (defined in the `bai-psp.yaml` file) from privileged SCC: - -```bash -oc adm policy remove-scc-from-user privileged -z -bai-psp-sa -oc adm policy remove-scc-from-user privileged -z default -``` diff --git a/BAI/README_upgrade.md b/BAI/README_upgrade.md deleted file mode 100644 index 362fb93c..00000000 --- a/BAI/README_upgrade.md +++ /dev/null @@ -1,74 +0,0 @@ -# Upgrading IBM Business Automation Insights - -This document describes how to upgrade IBM Business Automation Insights. - -## Upgrading from IBM Business Automation Insights version 19.0.3 to 20.0.1 - -### Important note about Elasticsearch snapshot storage - -If dynamic provisioning was used to create the Elasticsearch snapshot storage PersistentVolumeClaim for Business Automation Insights version 19.0.3, deleting this release deletes this PersistentVolumeClaim. It is recommended you back up the data in the PersistentVolume before uninstalling this release. - -If static provisioning was used to provision the snapshot storage PersistentVolumeClaim, this storage can be used for 20.0.1. The value for `ibm-dba-ek.elasticsearch.data.snapshotStorage.existingClaimName` can be used for the `spec.bai_configuration.ibm-dba-ek.elasticsearch.data.snapshotStorage.existingClaimName` value in the new ICP4ACluster custom resource. - -### Important note about restarting from a Flink processor checkpoint or savepoint - -If you need to ensure that, after upgrading, the Flink event processing resumes from its state before the upgrade, you must create a checkpoint or savepoint before the upgrade and you must restart the event processing from the specific checkpoint or savepoint after the upgrade. For further information, refer to [Upgrading Business Automation Insights from a specific checkpoint or savepoint](./README_upgrade_savepoint.md) - - -### Updating the shared configuration parameters - -In the `spec` element of your custom resource, make sure you have defined - -| Custom Resource parameter | Comment | -| ------------------------------------------------------------------------------ | ------------------| -| shared_configuration.sc_deployment_type | `production` or `non-production` | -| appVersion | 20.0.1 | - -For information about shared configuration parameters refer to [shared configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_shared_config_params.html) - -### Updating the Business Automation Insights configuration parameters - -Make sure that the custom resource YAML code does not contain any tag elements that point to 19.0.3 docker images from Business Automation Insights 19.0.3. Simply removing the tags ensures that the 20.0.1 docker images are used when the updated custom resource is deployed with the 20.0.1 image of the operator. - -Example: -``` Example my_icp4a_cr.yaml -bai_configuration: - setup: - image: - repository: /bai-setup - admin: - image: - repository: /bai-admin - ... -``` - -### Scale down the operator deployment - -Retrieve the initial number of replicas (`initialReplicas`) with the following command: - -`oc get deployment ibm-cp4a-operator -o jsonpath='{.spec.replicas}'` - -Scale down the number of operator deployment to `0` by running the following command: - -`kubectl scale --replicas=0 deployment ibm-cp4a-operator` - - -### Prune the Business Automation Insights 19.0.3 installation -Prune the Business Automation Insights 19.0.3 installation by running the delete command as follows. - -`kubectl delete PodDisruptionBudget,StatefulSet,Deployment,Job -l release= --namespace ` - - -### Scale up the operator deployment -Scale up the operator deployment by using the following command: - -`kubectl scale --replicas= deployment ibm-cp4a-operator` - -## Completing the upgrade - -Return to the appropriate update page to configure other components and complete the deployment using the operator. - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) diff --git a/BAI/README_upgrade_savepoint.md b/BAI/README_upgrade_savepoint.md deleted file mode 100644 index 4d8df4a3..00000000 --- a/BAI/README_upgrade_savepoint.md +++ /dev/null @@ -1,64 +0,0 @@ -# Upgrading Business Automation Insights from a specific checkpoint or savepoint - -This document describes how to restart event processing from a specific checkpoint or savepoint during an IBM Business Automation Insights upgrade. - -### Prerequisites - -Make sure you have the **jq** command-line JSON processor installed. The **jq** tool is available from this page: https://stedolan.github.io/jq/. - -### Procedure - -1. Retrieve the name of the job manager pod. - -``` -JOBMANAGER=`kubectl get pods --selector=release= --namespace | grep bai-flink-jobmanager | awk '{print $1}'` -``` - -2. Create savepoints for all the running processing jobs by using the script provided in the job manager pod. The processing is stopped right after the creation of the savepoints. - -``` -kubectl exec -it $JOBMANAGER --namespace -- scripts/create-savepoints.sh -s -``` - -This command stops the jobs and returns the path to the created savepoints. -Savepoint stored in `file:/mnt/pv/savepoints/dba/bai-/savepoint-`. - - -3. If the `create-savepoints.sh` script returns an error while savepoints are created, **and only in this case**, use the latest successful checkpoint. -The `create-savepoints.sh` script returns the names and identifiers of the jobs that failed to create savepoints.
-Not able to create savepoint for job 'dba/bai-``' with ID: `` - - a. Cancel the jobs to prevent the creation of new checkpoints. - - ``` - kubectl exec -it $JOBMANAGER --namespace -- flink cancel - ``` - - b. Retrieve the latest successful checkpoint. - - ``` - kubectl exec -it $JOBMANAGER --namespace -- curl -sk https://localhost:8081/jobs//checkpoints | jq ".latest.completed.external_path" - ``` - - -4. To ensure that, after upgrading, the processing restarts from the saved checkpoints, specify the `.recoveryPath` parameter of each job submitter in the custom resource YAML file. - -For this purpose, in the `spec.bai_configuration` element of your custom resource, make sure you have defined the path to the previously saved savepoints or checkpoints from which each job must recover. To use the default workflow of the job, leave this option empty. - -| Job name | Custom Resource parameter | -| ------------------------------------------------------------------------|-------------------------| -| **bai-bpmn** | `bpmn.recoveryPath` | -| **bai-bawadv** | `bawadv.recoveryPath` | -| **bai-icm** | `icm.recoveryPath` | -| **bai-odm** | `odm.recoveryPath` | -| **bai-content** | `content.recoveryPath` | -| **bai-ingestion** | `ingestion.recoveryPath` | -| **bai-adw** | `adw.recoveryPath` | - - -By default, you can restart a job from a same checkpoint or savepoint only once. This is a safety mechanism in case you forget to remove the value of the `.recoveryPath` parameter. If you try to restart more than once, the job submitter falls into error state and returns a message such as **Error: The savepoint was already used. The Job won't be run from there.** - -### Completing the Business Automation Insights upgrade - -Go back to the upgrade page to continue the Business Automation Insights upgrading procedure. - * [Upgrading IBM Business Automation Insights](./README_upgrade.md). diff --git a/BAI/configuration/bai_sample_cr.yaml b/BAI/configuration/bai_sample_cr.yaml deleted file mode 100644 index 1902e12e..00000000 --- a/BAI/configuration/bai_sample_cr.yaml +++ /dev/null @@ -1,130 +0,0 @@ -############################################################################### -# -# Licensed Materials - Property of IBM -# -# (C) Copyright IBM Corp. 2020. All Rights Reserved. -# -# US Government Users Restricted Rights - Use, duplication or -# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. -# -############################################################################### -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: bai-demo - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba - release: 20.0.1 -spec: - appVersion: 20.0.1 - - shared_configuration: - sc_deployment_type: "production" ### Possible values are "production" , "non-production" - - bai_configuration: - imageCredentials: - imagePullSecret: - - persistence: - useDynamicProvisioning: true - - flinkPv: - storageClassName: "" - - kafka: - bootstrapServers: "kafka.bootstrapserver1.hostname:9092,kafka.bootstrapserver2.hostname:9092,kafka.bootstrapserver3.hostname:9092" - securityProtocol: "PLAINTEXT" - - # settings: - # egress: true - # ingressTopic: ibm-bai-ingress - # egressTopic: ibm-bai-egress - # serviceTopic: ibm-bai-service - - setup: - image: - repository: /bai-setup - - admin: - image: - repository: /bai-admin - - flink: - initStorageDirectory: true - image: - repository: /bai-flink - zookeeper: - image: - repository: /bai-flink-zookeeper - - ingestion: - install: false - image: - repository: /bai-ingestion - - adw: - install: false - image: - repository: /bai-adw - - bpmn: - install: false - image: - repository: /bai-bpmn - - bawadv: - install: false - image: - repository: /bai-bawadv - - icm: - install: false - image: - repository: /bai-icm - - odm: - install: false - image: - repository: /bai-odm - - content: - install: false - image: - repository: /bai-content - - initImage: - image: - repository: /bai-init - - elasticsearch: - install: true - - ibm-dba-ek: - image: - imagePullPolicy: Always - imagePullSecret: - - elasticsearch: - image: - repository: /bai-elasticsearch - init: - image: - repository: /bai-init - data: - storage: - persistent: true - useDynamicProvisioning: true - storageClass: "" - snapshotStorage: - enabled: true - useDynamicProvisioning: true - storageClassName: "" - - kibana: - image: - repository: /bai-kibana - init: - image: - repository: /bai-init diff --git a/BAN/README_config.md b/BAN/README_config.md deleted file mode 100644 index c2e976e2..00000000 --- a/BAN/README_config.md +++ /dev/null @@ -1,137 +0,0 @@ -# Configuring IBM Business Automation Navigator 3.0.7 - -IBM Business Automation Navigator configuration settings are recorded and stored in the shared YAML file for operator deployment. After you prepare your environment, you add the values for your configuration settings to the YAML so that the operator can deploy your containers to match your environment. - -## Requirements and prerequisites - -Confirm that you have completed the following tasks to prepare to deploy your Business Automation Navigator images: - -- Prepare your Business Automation Navigator environment. These procedures include setting up databases, LDAP, storage, and configuration files that are required for use and operation. You must complete all of the [preparation steps for Business Automation Navigator](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_prepare_bank8s.html) before you are ready to deploy the container images. Collect the values for these environment components; you use them to configure your Business Automation Navigator container deployment. - -- Prepare your container environment. See [Preparing to install automation containers on Kubernetes](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/welcome/com.ibm.dba.install/op_topics/tsk_prepare_env_k8s.html) - -> **Note**: If you plan to use UMS integration with Business Automation Navigator, note that you might encounter registration failure errors during deployment. This can happen if the UMS deployment is not ready by the time the other containers come up. The situation resolves in the next operator loop, so the errors can be ignored. - -## Prepare your security environment - -You must also create a secret for the security details of the LDAP directory and datasources that you configured in preparation for use with IBM Business Automation Navigator. Collect the users, password to add to the secret. Using your values, run the following command: - - ``` -kubectl create secret generic ibm-ban-secret \ -   --from-literal=navigatorDBUsername="user_name" -   --from-literal=navigatorDBPassword="xxxxxxx" \ -   --from-literal=ldapUsername="CN=CEAdmin,OU=Shared,OU=Engineering,OU=FileNet,DC=dockerdom,DC=ecm,DC=ibm,DC=com" - --from-literal=ldapPassword="xxxxxxx" \ -   --from-literal=externalLdapUsername="cn=exUser1,ou=test1OU,dc=fncmad,dc=com" --from-literal=externalLdapPassword="xxxxxxx=" \ -   --from-literal=keystorePassword="xxxxxxx" \ -   --from-literal=ltpaPassword="xxxxxxx" \ - --from-literal=appLoginUsername=“user_name” - --from-literal=appLoginPassword=“xxxxxxx” - - ``` -The secret you create is the value for the parameter `ban_secret_name`. - -### Root CA and trusted certificate list - - The custom YAML file also requires values for the `root_ca_secret` and `trusted_certificate_list` parameters. The TLS secret contains the root CA's key value pair. You have the following choices for the root CA: - - You can generate a self-signed root CA - - You can allow the operator (or ROOTCA ansible role) to generate the secret with a self-signed root CA (by not specifying one) - - You can use a signed root CA. In this case, you create a secret that contains the root CA's key value pair in advance. - - The list of the trusted certificate secrets can be a TLS secret or an opaque secret. An opaque secret must contain a tls.crt file for the trusted certificate. The TLS secret has a tls.key file as the private key. - -## Customize the YAML file for your deployment - -All of the configuration values for the components that you want to deploy are included in the [ibm_cp4a_cr_template.yaml](../descriptors/ibm_cp4a_cr_template.yaml) file. Create a copy of this file on the system that you prepared for your container environment, for example `my_ibm_cp4a_cr_template.yaml`. - -The custom YAML file includes the following sections that apply for all of the components: -- shared_configuration - Specify your deployment and your overall security information. -- ldap_configuration - Specify the directory service provider information for all components in this common section. -- datasource configuration - Specify the database information for all components in this common section. -- monitoring_configuration - Optional for deployments where you want to enable monitoring. -- logging_configuration - Optional for deployments where you want to enable logging. - -After the shared section, the YAML includes a section of parameters for each of the available components. If you plan to include a component in your deployment, you un-comment the parameters for that component and update the values. For some parameters, the default values are sufficient. For other parameters, you must supply values that correspond to your specific environment or deployment needs. - -The optional initialize_configuration and verify_configuration section includes values for a set of automatic set up steps for your IBM Business Automation Navigator deployment. - -If you want to exclude any components from your deployment, leave the section for that component and all related parameters commented out in the YAML file. - -For a more focused YAML file that contains the default value for each Business Automation Navigator, see the [fncm_ban_sample_cr.yaml](../FNCM/configuration/fncm_ban_sample_cr.yaml). You can use this shorter sample resource file to compile all the values you need for your Business Automation Navigator environment, then copy the sections into the [ibm_cp4a_cr_template.yaml](../descriptors/ibm_cp4a_cr_template.yaml) file before you deploy. - -A description of the configuration parameters is available in [Configuration reference for operators](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_ban_opparams.html) - -Use the information in the following sections to record the configuration settings for the components that you want to deploy. - -- [Shared configuration settings](README_config.md#shared-configuration-settings) -- [Business Automation Navigator settings](README_config.md#business-automation-navigator-settings) -- [Initialization settings](README_config.md#initialization-settings) -- [Verification settings](README_config.md#verification-settings) - -### Shared configuration settings - -Un-comment and update the values for the shared configuration, LDAP, datasource, monitoring, and logging parameters, as applicable. - - > **Reminder**: Set `shared_configuration.sc_deployment_platform` to a blank value if you are deploying on a non-OpenShift certified Kubernetes platform. - -Use the secrets that you created in Preparing your security environment for the `root_ca_secret` and `trusted_certificate_list` values. - -> **Reminder**: If you plan to use External Share with the 2 LDAP model for configuring external users, update the LDAP values in the `ext_ldap_configuration` section of the YAML file with the information about the directory server that you configured for external users. If you are not using the 2 LDAP model of external share, leave this section commented out. - -For more information about the shared parameters, see the following topics: - -- [Shared parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opsharedparams.html) -- [LDAP parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html) -- [Datasource parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_dbparams.html) -- [Monitoring parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opmonparams.html) - - -### Business Automation Navigator settings - -Use the `navigator_configuration` section of the custom YAML to provide values for the configuration of Business Automation Navigator. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your Business Automation Navigator environment, and tuning decisions for your runtime environment. - -In the Business Automation Navigator section, leave the `enable_appcues` setting with the default value, false. - -For more information about the settings, see [Business Automation Navigator parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_ban_opparams.html) - -### Initialization settings - -Use the `initialize_configuration` section of the custom YAML to provide values for the automatic initialization and setup of Content Platform Engine and Business Automation Navigator. The initialization container creates required configuration of IBM Business Automation Navigator. You also make decisions for your runtime environment. - -You can edit the YAML to configure more than one of the available pieces in your automatically initialized environment. For example, if you want to create an additional Business Automation Navigator repository, you copy the stanza for the repository settings, paste it below the original, and add the new values for your additional repository: - - ``` -# icn_repos: - # - add_repo_id: "demo_repo1" - # add_repo_ce_wsi_url: "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM/" - # add_repo_os_sym_name: "OS01" - # add_repo_os_dis_name: "OS01" - # add_repo_workflow_enable: false - # add_repo_work_conn_pnt: "pe_conn_os1:1" - # add_repo_protocol: "FileNetP8WSI" - - ``` - -You can create additional object stores, Content Search Services indexes, IBM Content Navigator repositories, and IBM Content Navigator desktops. - -For more information about the settings, see [Initialization parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opinitiparams.html) - -### Verification settings - -Use the `verify_configuration` section of the custom YAML to provide values for the automatic verification of your Content Platform Engine and IBM Content Navigator. The verify container works in conjunction with the automatic setup of the initialize container. You can accept most of the default settings for the verification. However, compare the settings with the values that you supply for the initialization settings. Specific settings like object store names and the Content Platform Engine connection point must match between these two configuration sections. - -For more information about the settings, see [Verify parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opverifyparams.html) - -## Complete the installation - -After you have set all of the parameters for the relevant components, return to to the install or update page for your platform to configure other components and complete the deployment with the operator. - -Install pages: - - [Installing on Managed Red Hat OpenShift on IBM Cloud Public](../platform/roks/install.md) - - [Installing on Red Hat OpenShift](../platform/ocp/install.md) - - [Installing on Certified Kubernetes](../platform/k8s/install.md) - -Update pages: - - [Updating on Managed Red Hat OpenShift on IBM Cloud Public](../platform/roks/update.md) - - [Updating on Red Hat OpenShift](../platform/ocp/update.md) - - [Updating on Certified Kubernetes](../platform/k8s/update.md) diff --git a/BAN/README_migrate.md b/BAN/README_migrate.md deleted file mode 100644 index f563a6ed..00000000 --- a/BAN/README_migrate.md +++ /dev/null @@ -1,22 +0,0 @@ -# Migrating Business Automation Navigator 3.0.x to V3.0.7 - -Because of the change in the container deployment method, there is no upgrade path for previous versions of Business Automation Navigator to V3.0.7. - -To move a V3.0.x installation to V3.0.7, you prepare your environment and deploy the operator the same way you would for a new installation. The difference is that you use the configuration values for your previously configured environment, including datasource, LDAP, storage volumes, etc. when you customize your deployment YAML file. - -Optionally, to protect your production deployment, you can create a replica of your data and use that datasource information for the operator deployment to test your migration. In this option, you follow the instructions for a new deployment. - - -## Step 1: Collect parameter values from your existing deployment - -You can use the reference topics in the [Cloud Pak for Automation Knowldege Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_ban_opparams.html) to see the parameters that apply for your components and shared configuration. - -You will use the values for your existing deployment to update the custom YAML file for the new operator deployment. For more information, see [Configure Business Automation Navigator](README_config.md). - -> **Note**: When you are ready to deploy the V3.0.7 version of your Business Automation Navigator container, stop your previous container. - -## Step 2: Return to the platform readme to migrate other components - -- [Managed OpenShift migrate page](../platform/roks/migrate.md) -- [OpenShift migrate page](../platform/ocp/migrate.md) -- [Kubernetes migrate page](../platform/k8s/migrate.md) diff --git a/BAN/README_upgrade.md b/BAN/README_upgrade.md deleted file mode 100644 index 9a22568e..00000000 --- a/BAN/README_upgrade.md +++ /dev/null @@ -1,34 +0,0 @@ -# Upgrading from Business Automation Navigator from 19.0.3 to 20.0.1 - -These instructions cover the upgrade of Business Automation Navigator from 19.0.3 to 20.0.1. - -## Introduction - -You can upgrade your Business Automation Navigator 19.0.3 deployments to apply the updates that are associated with Business Automation Navigator 20.0.1. - -## Step 1: Update the custom resource YAML file for your Business Automation Navigator 20.0.1 deployment. - -Get the custom resource YAML file that you used to deploy Business Automation Navigator 19.0.3, and edit it by following these steps: - -1. Change the release version from 19.0.3 to 20.0.1. - -2. Add `appVersion: 20.0.1` to the spec section that appears at the beginning of the file. - -``` -spec: - appVersion: 20.0.1 -``` - -3. In the `ban`section, update the tag value for the new version: - - * navigator:ga-307-icn-if002 - * navigator-sso:ga-307-icn-if002 - -## Step 2: Update the configuration sections for other deployments - -To update the configuration sections for other components, such as FileNet Content Manager, go back to the relevant upgrade page to follow their upgrade documents to update your custom resource YAML file. - -Upgrade pages: - - [Managed OpenShift upgrade page](../platform/roks/upgrade.md) - - [OpenShift upgrade page](../platform/ocp/upgrade.md) - - [Certified Kubernetes upgrade page](../platform/k8s/upgrade.md) diff --git a/BAN/configuration/ICN/configDropins/overrides/ESAPIWafPolicy.xml b/BAN/configuration/ICN/configDropins/overrides/ESAPIWafPolicy.xml new file mode 100644 index 00000000..5cbaf292 --- /dev/null +++ b/BAN/configuration/ICN/configDropins/overrides/ESAPIWafPolicy.xml @@ -0,0 +1,41 @@ + + + + + + block + + /error.jsp + 500 + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/BAS/README_config.md b/BAS/README_config.md deleted file mode 100644 index 6823d31a..00000000 --- a/BAS/README_config.md +++ /dev/null @@ -1,147 +0,0 @@ -# Configuring IBM Business Automation Studio 20.0.1 - -These instructions cover the basic installation and configuration of IBM Business Automation Studio. - -## Table of contents - - [Business Automation Studio Component Details](#Business-Automation-Studio-Component-Details) - - [Prerequisites](#Prerequisites) - - [Resources Required](#Resources-Required) - - [Step 1: Preparing to install Business Automation Studio for Production](#Step-1-Preparing-to-install-Business-Automation-Studio-for-Production) - - [Step 2: Configuring Redis for App Engine Playback Server (Optional)](#Step-2-Configuring-Redis-for-App-Engine-Playback-Server-Optional) - - [Step 3: Implementing storage (Optional)](#Step-3-implementing-storage-optional) - - [Step 4: Configuring the custom resource YAML file for your Business Automation Studio deployment](#Step-4-Configuring-the-custom-resource-YAML-file-for-your-Business-Automation-Studio-deployment) - - [Step 5: Completing the installation](#Step-5-Completing-the-installation) - - [Limitations](#Limitations) - -## Introduction - -This installation deploys a Business Automation Studio environment, the single authoring and development environment for the IBM Cloud Pak for Automation platform, where you can go to author business services, applications, and digital workers. - -## Business Automation Studio Component Details - -This component deploys several services and components. - -In the standard configuration, it includes these components: - -* IBM Business Automation Studio (BAStudio) component -* IBM Resource Registry component -* IBM Business Automation Application Engine (App Engine) playback server component - -Notes: - - The IBM Business Automation Application Engine (App Engine) playback server component is designed to provide a playback environment for application development use. The App Engine installed as a playback server doesn't contain all the features needed by the App Engine in a production environment and can't be used as a production App Engine server. - - For a production environment, deploy the App Engine following the instructions in [Application Engine Configuration](../AAE/README_config.md). - -To support those components, a standard installation generates: - - * 5 ConfigMaps that manage the configuration of Business Automation Studio server - * 2 deployments running the Business Automation Studio server and App Engine playback server - * 1 StatefulSet running JMS - * 4 or more jobs for Business Automation Studio and Resource Registry - * 5 secrets to get access - * 5 services to route the traffic to Business Automation Studio server - -## Prerequisites - - * [User Management Service](../UMS/README_config.md) - * Resource Registry, which is included in the Business Automation Studio configuration. If you already configured Resource Registry through another component, you need not install it again. - -## Resources Required - -Follow the OpenShift instructions in [Planning Your Installation 3.11](https://docs.openshift.com/container-platform/3.11/install/index.html#single-master-single-box) or [Planning your Installation 4.2](https://docs.openshift.com/container-platform/4.2/welcome/index.html). Then check the required resources in [System and Environment Requirements on OCP 3.11](https://docs.openshift.com/container-platform/3.11/install/prerequisites.html) or [System and Environment Requirements on OCP 4.2](https://docs.openshift.com/container-platform/4.2/architecture/architecture.html) and set up your environment. - -| Component name | Container | CPU | Memory | -| --- | --- | --- | --- | -| BAStudio | BAStudio container | 2 | 2Gi | -| BAStudio | Init containers | 200m | 256Mi | -| BAStudio | JMS containers | 500m | 512Mi | -| Resource Registry | Resource Registry container | 200m | 256Mi | -| Resource Registry | Init containers | 100m | 128Mi | -| App Engine Playback Server | App Engine container | 1 | 1Gi | -| App Engine Playback Server | Init containers | 200m | 128Mi | - -## Step 1: Preparing to install Business Automation Studio for Production - -Besides the common steps to set up the operator environment, you must do the following steps before you install Business Automation Studio. - -* Create the Business Automation Studio and App Engine playback server databases. See [Creating databases](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_basprep_db.html). -* Create the required secrets. See [Protecting sensitive configuration data](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_basprep_data.html). - -## Step 2: Configuring Redis for App Engine Playback Server (Optional) - -The default replica size of the App Engine playback server is 1. You can have only one App Engine pod because it's a playback server for application development use. If you need the replica size to be more than 1 or you enabled the Horizontal Pod Autoscaler for the playback server, you must configure the App Engine playback server with Remote Dictionary Server (Redis). For instructions, see [Optional: Configuring App Engine playback server with Redis](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_basprep_redis.html). - -## Step 3: Implementing storage (Optional) - -You can optionally add your own persistent volume (PV) and persistent volume claim (PVC) if you want to use your own JDBC driver or you want Resource Registry to be backed up automatically. The minimum supported size is 1 GB. For instructions see [Optional: Implementing storage](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_basprep_storage.html). - - -## Step 4: Configuring the custom resource YAML file for your Business Automation Studio deployment - - 1. Make sure that you've set the configuration parameters for [User Management Service](../UMS/README_config.md) in your copy of the template custom resource YAML file. - 2. Edit your copy of the template custom resource YAML file and make the following updates. After completing those updates, if you need to install other components, go to [Step 5](README_config.md#step-5-Completing-the-installation) and do the configuration for those components, using the same YAML file. - - a. Uncomment and update the shared_configuration section if you haven't done it already. - - b. Update the `bastudio_configuration` and `resource_registry_configuration` sections. - * Automatic backup for Resource Registry is recommended. See [Enabling Resource Registry disaster recovery](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.managing/topics/tsk_enabling_disaster_recovery.html) for configuration information. - * If you just want to install BAStudio with the minimal required values, replace the contents of `bastudio_configuration` and `resource_registry_configuration` in your copy of the template custom resource YAML file with the values from the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - * If you want to use the full configuration list and customize the values, update the required values in `bastudio_configuration` and `resource_registry_configuration` in your copy of the template custom resource YAML file based on your configuration. - -Note: The hostname must be less than 64 characters. Use a wildcard DNS (https://nip.io/) if the hostname is too long. For example, instead of: - -``` - resource_registry_configuration: - admin_secret_name: op-bas-rr-admin-secret - hostname: hostname: rr-{{ meta.namespace }.I-have-a-very-long-hostname-which-exceeds-64-characters.cloud.com -``` - -the hostname can use a wildcard: - -``` - resource_registry_configuration: - admin_secret_name: op-bas-rr-admin-secret - hostname: rr-{{ meta.namespace }..nip.io -``` - -### Configuration - -If you want to customize your custom resource YAML file, refer to the [configuration list](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_bas_params.html) for each parameter. - -## Step 5: Completing the installation - -Go back to the relevant installation or update page to configure other components and complete the deployment with the operator. - -Installation pages: - - [Managed OpenShift installation page](../platform/roks/install.md) - - [OpenShift installation page](../platform/ocp/install.md) - - [Certified Kubernetes installation page](../platform/k8s/install.md) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) - - -## Limitations - -* After you deploy Business Automation Studio, you can't change the Business Automation Studio or App Engine playback server admin user. - -* Because of a node.js server limitation, App Engine playback server image trusts only root CA. If an external service is used and signed with another root CA, you must add the root CA as trusted instead of the service certificate. - - * The certificate can be self-signed, or signed by a well-known root CA. - * If you're using a depth zero self-signed certificate, it must be listed as a trusted certificate. - * If you're using a certificate signed by a self-signed root CA, the self-signed root CA must be in the trusted list. Using a leaf certificate in the trusted list is not supported. - * If you're adding the root CA of two or more external services to the App Engine trust list, you can't use the same common name for those root CAs. - -* The Business Automation Studio components support only the IBM DB2 database. - -* The App Engine playback server supports only the IBM DB2 database. - -* The JMS statefulset doesn't support scale. You must keep the replica size of the JMS statefulset at 1. - -* Resource Registry limitation - - Because of the design of etcd, it's recommended that you don't change the replica size after you create the Resource Registry cluster to prevent data loss. If you must set the replica size, set it to an odd number. If you reduce the pod size, the pods are destroyed one by one slowly to prevent data loss or the cluster getting out of sync. - - * If you update the Resource Registry admin secret to change the username or password, first delete the -dba-rr- pods to cause Resource Registry to enable the updates. Alternatively, you can enable the update manually with etcd commands. - * If you update the Resource Registry configurations in the icp4acluster custom resource instance. the update might not affect the Resource Registry pod directly. It will affect the newly created pods when you increase the number of replicas. diff --git a/BAS/README_migrate.md b/BAS/README_migrate.md deleted file mode 100644 index 20896964..00000000 --- a/BAS/README_migrate.md +++ /dev/null @@ -1,15 +0,0 @@ -# Migrating from IBM Business Automation Studio 19.0.2 to 20.0.1 - -These instructions cover the migration of IBM Business Automation Studio from 19.0.2 to 20.0.1. - -## Introduction - -If you install IBM Business Automation Studio 19.0.2 and want to continue to use your 19.0.2 applications in Business Automation Studio 20.0.1, you can migrate your applications from Business Automation Studio 19.0.2 to 20.0.1. - -## Step 1: Export apps that were authored in 19.0.2 - -Log in to the admin console in your Business Automation Studio 19.0.2 environment, then export your apps as .twx files. - -## Step 2: Import the apps to 20.0.1 - -Install [IBM Business Automation Studio 20.0.1](../BAS/README_config.md), then import the apps that you exported. diff --git a/BAS/README_upgrade.md b/BAS/README_upgrade.md deleted file mode 100644 index a4f96abb..00000000 --- a/BAS/README_upgrade.md +++ /dev/null @@ -1,36 +0,0 @@ -# Upgrading from IBM Business Automation Studio 19.0.3 to 20.0.1 - -These instructions cover the upgrade of IBM Business Automation Studio from 19.0.3 to 20.0.1. - -## Introduction - -If you installed Business Automation Studio 19.0.3 and want to continue to use your 19.0.3 applications in Business Automation Studio 20.0.1, you can upgrade your applications from Business Automation Studio 19.0.3 to 20.0.1. - -## Step 1: Update the custom resource YAML file for your Business Automation Studio 20.0.1 deployment - -Get the custom resource YAML file that you used to deploy Business Automation Studio 19.0.3, and edit it by following these steps: - -1. Change the release version from 19.0.3 to 20.0.1. - -2. Add `appVersion: 20.0.1` to the `spec` section. See the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - -3. Update the `bastudio_configuration` and `resource_registry_configuration` sections. - - * Automatic backup for Resource Registry is recommended. See [Enabling Resource Registry disaster recovery](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.managing/topics/tsk_enabling_disaster_recovery.html) for configuration information. - - * If you just want to update Business Automation Studio with the minimal required values, use the values in the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - * Add `admin_user` to the `bastudio_configuration` section. - * Add `admin_user` to the `playback_server` in the `bastudio_configuration` section. - * Change the image tags from 19.0.3 to 20.0.1 in all sections. - - * If you want to use the full configuration list and customize the values, update the required values in the `bastudio_configuration` and `resource_registry_configuration` sections in your custom resource YAML file based on your configuration. See the [configuration list](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_bas_params.html) for each parameter. - -## Step 2: Update the configuration sections for other deployments - -To update the configuration sections for other components, such as User Management Service and IBM Business Automation Navigator, go back to the relevant upgrade page to follow their upgrade documents to update your custom resource YAML file. - -Upgrade pages: - - [Managed OpenShift upgrade page](../platform/roks/upgrade.md) - - [OpenShift upgrade page](../platform/ocp/upgrade.md) - - [Certified Kubernetes upgrade page](../platform/k8s/upgrade.md) - diff --git a/BAS/configuration/sample_min_value.yaml b/BAS/configuration/sample_min_value.yaml deleted file mode 100644 index dbd97b38..00000000 --- a/BAS/configuration/sample_min_value.yaml +++ /dev/null @@ -1,71 +0,0 @@ -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: demo-template - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba - release: 20.0.1 -spec: - appVersion: 20.0.1 - ##################################################################### - ## IBM Business Automation Studio 20.0.1 configuration ## - ##################################################################### - bastudio_configuration: - admin_secret_name: bastudio-admin-secret - admin_user: - images: - bastudio: - repository: cp.icr.io/cp/cp4a/bas/bastudio - tag: 20.0.1 - hostname: - port: 443 - database: - host: - # The database provided should be created by the BAStudio SQL script template. - name: - port: - # If you want to enable database automatic client reroute (ACR) for HADR, you must configure alternative_host and alternative_port. Otherwise, leave them blank. - alternative_host: - alternative_port: - type: db2 - jms_server: - image: - repository: cp.icr.io/cp/cp4a/bas/jms - tag: 20.0.1 - #----------------------------------------------------------------------- - # App Engine Playback Server (playback_server) can be only one instance. This is different from App Engine (where application_engine_configuration is a list and you can deploy multiple instances). - #----------------------------------------------------------------------- - playback_server: - admin_secret_name: playback-server-admin-secret - admin_user: - images: - db_job: - repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db - tag: 20.0.1 - solution_server: - repository: cp.icr.io/cp/cp4a/bas/solution-server - tag: 20.0.1 - hostname: - port: 443 - database: - host: - # The database provided should be created by the App Engine Playback Server SQL script template. - name: - port: - # If you want to enable database ACR for HADR, configure alternative_host and alternative_port. Otherwise, leave them blank. - alternative_host: - alternative_port: - type: db2 - - ## Resource Registry Configuration - ## Important: if you've already configured Resource Registry, you don't need to change the resource_registry_configuration section in your copy of the template custom resource YAML file. - resource_registry_configuration: - admin_secret_name: resource-registry-admin-secret - images: - resource_registry: - repository: cp.icr.io/cp/cp4a/bas/dba-etcd - tag: 20.0.1 - hostname: - port: 443 diff --git a/FNCM/README_config.md b/FNCM/README_config.md deleted file mode 100644 index 9b033c32..00000000 --- a/FNCM/README_config.md +++ /dev/null @@ -1,194 +0,0 @@ -# Configuring IBM FileNet Content Manager 5.5.4 - -IBM FileNet Content Manager provides numerous containerized components for use in your container environment. The configuration settings for the components are recorded and stored in the shared YAML file for operator deployment. After you prepare your environment, you add the values for your configuration settings to the YAML so that the operator can deploy your containers to match your environment. - -## Requirements and prerequisites - -Confirm that you have completed the following tasks to prepare to deploy your FileNet Content Manager images: - -- Prepare your FileNet Content Manager environment. These procedures include setting up databases, LDAP, storage, and configuration files that are required for use and operation. You must complete all of the [preparation steps for FileNet Content Manager](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_prepare_ecmk8s.html) before you are ready to deploy the container images. Collect the values for these environment components; you use them to configure your FileNet Content Manager container deployment. - -- Prepare your container environment. See [Preparing to install automation containers on Kubernetes](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/welcome/com.ibm.dba.install/op_topics/tsk_prepare_env_k8s.html) - -- If you want to deploy additional optional containers, prepare the requirements that are specific to those containers. For details see the following information: - - [Preparing for External Share](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_cm_externalshareop.html) - - [Preparing volumes and folders for the Content Services GraphQL API](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_gqlvolumesop.html) - -If you plan to use external key management in your environment, review the following preparation information before you deploy: [Preparing for external key management](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_prepare_ecm_externalkeyk8s.html) - -> **Note**: If you plan to use UMS integration with any of the FileNet Content Manager components, note that you might encounter registration failure errors during deployment. This can happen if the UMS deployment is not ready by the time the other containers come up. The situation resolves in the next operator loop, so the errors can be ignored. - - -## Prepare your security environment - -Before you deploy, you must create a secret for the security details of the LDAP directory and datasources that you configured in preparation for use with FileNet Content Manager. Collect the users, password, and namespace to add to the secret. Using your values, run the following command: - - ``` -kubectl create secret generic ibm-fncm-secret \ ---from-literal=gcdDBUsername="db2inst1" --from-literal=gcdDBPassword="xxxxxxxx" \ ---from-literal=osDBUsername="db2inst1" --from-literal=osDBPassword="xxxxxxxx" \ ---from-literal=ldapUsername="cn=root" --from-literal=ldapPassword="xxxxxxxxxx" \ ---from-literal=externalLdapUsername="cn=User1,ou=test,dc=external,dc=com" --from-literal=externalLdapPassword="xxxxxxx" \ ---from-literal=appLoginUsername="filenet_admin" --from-literal=appLoginPassword="xxxxxxxx" \ ---from-literal=keystorePassword="xxxxx" \ ---from-literal=ltpaPassword="xxxxxx" - ``` -The secret you create is the value for the parameter `fncm_secret_name`. - - -### Root CA and trusted certificate list - - The custom YAML file also requires values for the `root_ca_secret` and `trusted_certificate_list` parameters. The TLS secret contains the root CA's key value pair. You have the following choices for the root CA: - - You can generate a self-signed root CA - - You can allow the operator (or ROOTCA ansible role) to generate the secret with a self-signed root CA (by not specifying one) - - You can use a signed root CA. In this case, you create a secret that contains the root CA's key value pair in advance. - - The list of the trusted certificate secrets can be a TLS secret or an opaque secret. An opaque secret must contain a tls.crt file for the trusted certificate. The TLS secret has a tls.key file as the private key. - - Note that if you plan to use the external Content Platform Engine tools, you must use either the Root CA and trusted certificate list or Ingress configuration. - -## Customize the YAML file for your deployment - -All of the configuration values for the components that you want to deploy are included in the [ibm_cp4a_cr_template.yaml](../descriptors/ibm_cp4a_cr_template.yaml) file. Create a copy of this file on the system that you prepared for your container environment, for example `my_ibm_cp4a_cr_template.yaml`. - -The custom YAML file includes the following sections that apply for all of the components: -- shared_configuration - Specify your deployment and your overall security information. -- ldap_configuration - Specify the directory service provider information for all components in this common section. -- datasource configuration - Specify the database information for all components in this common section. -- monitoring_configuration - Optional for deployments where you want to enable monitoring. -- logging_configuration - Optional for deployments where you want to enable logging. - -After the shared section, the YAML includes a section of parameters for each of the available components. If you plan to include a component in your deployment, you un-comment the parameters for that component and update the values. For some parameters, the default values are sufficient. For other parameters, you must supply values that correspond to your specific environment or deployment needs. - -The optional initialize_configuration and verify_configuration section includes values for a set of automatic set up steps for your FileNet P8 domain and IBM Business Automation Navigator deployment. - -If you want to exclude any components from your deployment, leave the section for that component and all related parameters commented out in the YAML file. - -All FileNet Content Manager components require that you deploy the Content Platform Engine container. For that reason, you must complete the values for that section in all deployment use cases. - -For a more focused YAML file that contains the default value for each FileNet Content Manager parameter, see the [fncm_ban_sample_cr.yaml](configuration/fncm_ban_sample_cr.yaml). You can use this shorter sample resource file to compile all the values you need for your FileNet Content Manager environment, then copy the sections into the [ibm_cp4a_cr_template.yaml](../descriptors/ibm_cp4a_cr_template.yaml) file before you deploy. - -A description of the configuration parameters is available in [Configuration reference for operators](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_paramsop.html) - -Use the information in the following sections to record the configuration settings for the components that you want to deploy. - -- [Shared configuration settings](README_config.md#shared-configuration-settings) -- [Content Platform Engine settings](README_config.md#content-platform-engine-settings) -- [Content Search Services settings](README_config.md#content-search-services-settings) -- [Content Management Interoperability Services settings](README_config.md#content-management-interoperability-services-settings) -- [Content Services GraphQL settings](README_config.md#content-services-graphql-settings) -- [External Share settings](README_config.md#external-share-settings) -- [Task Manager settings](README_config.md#task-manager-settings) -- [Initialization settings](README_config.md#initialization-settings) -- [Verification settings](README_config.md#verification-settings) - -### Shared configuration settings - -Un-comment and update the values for the shared configuration, LDAP, datasource, monitoring, and logging parameters, as applicable. - - > **Reminder**: Set `shared_configuration.sc_deployment_platform` to a blank value if you are deploying on a non-OpenShift certified Kubernetes platform. - - -Use the secrets that you created in Preparing your security environment for the `root_ca_secret` and `trusted_certificate_list` values. - -> **Reminder**: If you plan to use External Share with the 2 LDAP model for configuring external users, update the LDAP values in the `ext_ldap_configuration` section of the YAML file with the information about the directory server that you configured for external users. If you are not using the 2 LDAP model of external share, leave this section commented out. - -For more information about the shared parameters, see the following topics: - -- [Shared parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opsharedparams.html) -- [LDAP parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html) -- [Datasource parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_dbparams.html) -- [Monitoring parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opmonparams.html) - -### Content Platform Engine settings - -Use the `cpe` section of the custom YAML to provide values for the configuration of Content Platform Engine. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your Content Platform Engine environment, and tuning decisions for your runtime environment. - -For more information about the settings, see [Content Platform Engine parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opcpeparams.html) - -### Content Search Services settings - -Use the `css` section of the custom YAML to provide values for the configuration of Content Search Services. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your Content Search Services environment, and tuning decisions for your runtime environment. - -For more information about the settings, see [Content Search Services parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opcssparams.html) - -### Content Management Interoperability Services settings - -Use the `cmis` section of the custom YAML to provide values for the configuration of Content Search Services. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your Content Search Services environment, and tuning decisions for your runtime environment. - -For more information about the settings, see [Content Management Interoperability Services parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opcmisparams.html) - -### Content Services GraphQL settings - -Use the `graphql` section of the custom YAML to provide values for the configuration of the Content Services GraphQL API. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your Content Services GraphQL environment, and tuning decisions for your runtime environment. - -The section includes a parameter for enabling the GraphiQL development interface. Note the following consideration for including GraphiQL in your environment: - -- If you are deploying the GraphQL container as part of a test or development environment and you want to use GraphiQL with the API, set the enable_graph_iql parameter to true. -- If you are deploying the GraphQL container as part of a production environment, it is recommended to set the enable_graph_iql parameter to false. - -For more information about the settings, see [Content Services GraphQL parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opgqlparams.html) - -### External share settings - -Use the `es` section of the custom YAML to provide values for the configuration of External Share. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your External Share environment, and tuning decisions for your runtime environment. - -> **Reminder**: If you are using the 2 LDAP approach for managing your external users for external share, you must configure the ext_ldap_configuration section in the shared parameters with information about your external user LDAP directory service. - -> **Note**: If you are deploying the External Share container as an update instead of as part of the initial container deployment, note that both the Content Platform Engine and the Business Automation Navigator containers will undergo a rolling update to accommodate the External Share configuration. - -For more information about the settings, see [External Share parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opesparams.html) - -### Task Manager settings - -Use the `tm` section of the custom YAML to provide values for the configuration of Task Manager. You provide details for configuration settings that you have already created, like the names of your persistent volume claims. You also provide names for pieces of your Task Manager environment, and tuning decisions for your runtime environment. - -If you want to deploy Task Manager, you must also deploy IBM Business Automation Navigator. The Task Manager uses the same database as IBM Business Automation Navigator. Database settings must match between these two components. - -For Task Manager, pay particular attention to any relevant values in the `jvm_customize_options` parameter. - -For more information about the settings, see [Task Manager parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_optmparams.html) - -### Initialization settings - -Use the `initialize_configuration` section of the custom YAML to provide values for the automatic initialization and setup of Content Platform Engine and IBM Business Automation Navigator. The initialization container creates initial instances of your FileNet Content Manager components, such as the p8 domain, one or more object stores, and configuration of IBM Business Automation Navigator. You also provide names for pieces of your FileNet Content Manager environment, and make decisions for your runtime environment. - -You can edit the YAML to configure more than one of the available pieces in your automatically initialized environment. For example, if you want to create an additional Content Search Services server, you copy the stanza for the server settings, paste it below the original, and add the new values for your additional object store: - - ``` -ic_css_creation: - # - css_site_name: "Initial Site" - # css_text_search_server_name: "{{ meta.name }}-css-1" - # affinity_group_name: "aff_group" - # css_text_search_server_status: 0 - # css_text_search_server_mode: 0 - # css_text_search_server_ssl_enable: "true" - # css_text_search_server_credential: "RNUNEWc=" - # css_text_search_server_host: "{{ meta.name }}-css-svc-1" - # css_text_search_server_port: 8199 - - ``` - -You can create additional object stores, Content Search Services indexes, IBM Business Automation Navigator repositories, and IBM Business Automation Navigator desktops. - -For more information about the settings, see [Initialization parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opinitiparams.html) - -### Verification settings - -Use the `verify_configuration` section of the custom YAML to provide values for the automatic verification of your Content Platform Engine and IBM Business Automation Navigator. The verify container works in conjunction with the automatic setup of the initialize container. You can accept most of the default settings for the verification. However, compare the settings with the values that you supply for the initialization settings. Specific settings like object store names and the Content Platform Engine connection point must match between these two configuration sections. - -For more information about the settings, see [Verify parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opverifyparams.html) - -## Complete the installation - -After you have set all of the parameters for the relevant components, return to to the install or update page for your platform to configure other components and complete the deployment with the operator. - -Install pages: - - [Installing on Managed Red Hat OpenShift on IBM Cloud Public](../platform/roks/install.md) - - [Installing on Red Hat OpenShift](../platform/ocp/install.md) - - [Installing on Certified Kubernetes](../platform/k8s/install.md) - -Update pages: - - [Updating on Managed Red Hat OpenShift on IBM Cloud Public](../platform/roks/update.md) - - [Updating on Red Hat OpenShift](../platform/ocp/update.md) - - [Updating on Certified Kubernetes](../platform/k8s/update.md) diff --git a/FNCM/README_migrate.md b/FNCM/README_migrate.md deleted file mode 100644 index ccadd7e7..00000000 --- a/FNCM/README_migrate.md +++ /dev/null @@ -1,22 +0,0 @@ -# Migrating IBM FileNet Content Manager 5.5.x persisted data to V5.5.4 - -Because of the change in the container deployment method, there is no upgrade path for previous versions of FileNet Content Manager to V5.5.4. - -To move a V5.5.x installation to V5.5.4, you prepare your environment and deploy the operator the same way you would for a new installation. The difference is that you use the configuration values for your previously configured environment, including datasource, LDAP, storage volumes, etc. when you customize your deployment YAML file. - -Optionally, to protect your production deployment, you can create a replica of your data and use that datasource information for the operator deployment to test your migration. In this option, you follow the instructions for a new deployment. - - -## Step 1: Collect parameter values from your existing deployment - -You can use the reference topics in the [Cloud Pak for Automation Knowldege Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_paramsop.html) to see the parameters that apply for your components and shared configuration. - -You will use the values for your existing deployment to update the custom YAML file for the new operator deployment. For more information, see [Configure IBM FileNet Content Manager](README_config.md). - -> **Note**: When you are ready to deploy the V5.5.4 version of your FileNet Content Manager containers, stop your previous containers. - -## Step 2: Return to the platform readme to migrate other components - -- [Managed OpenShift migrate page](../platform/roks/migrate.md) -- [OpenShift migrate page](../platform/ocp/migrate.md) -- [Kubernetes migrate page](../platform/k8s/migrate.md) diff --git a/FNCM/README_upgrade.md b/FNCM/README_upgrade.md deleted file mode 100644 index 59b95ce3..00000000 --- a/FNCM/README_upgrade.md +++ /dev/null @@ -1,38 +0,0 @@ -# Upgrading from FileNet Content Manager 19.0.3 to 20.0.1 - -These instructions cover the upgrade of FileNet Content Manager from 19.0.3 to 20.0.1. - -## Introduction - -You can upgrade your FileNet Content Manager for IBM Cloud Pak for Automation 19.0.3 deployments to apply the updates that are associated with FileNet Content Manager for IBM Cloud Pak for Automation 20.0.1. - -## Step 1: Update the custom resource YAML file for your FileNet Content Manager for Cloud Pak for Automation 19.0.3 deployment. - -Get the custom resource YAML file that you used to deploy FileNet Content Manager in 19.0.3, and edit it by following these steps: - -1. Change the release version from 19.0.3 to 20.0.1. - -2. Add `appVersion: 20.0.1` to the spec section that appears at the beginning of the file. - -``` -spec: - appVersion: 20.0.1 -``` - -3. In the sections for each of the components that are included in your FileNet Content Manager deployment in the `ecm_configuration` section, for example `cpe`, `css`, and so on, update the tag values for the new versions: - - * cpe:ga-554-p8cpe-if001 - * css:ga-554-p8css-if001 - * graphql:ga-554-p8cgql-if001 - * cmis:ga-304-cmis-if010 - * extshare:ga-307-es-if002 - * taskmgr:ga-307-tm-if002 - -## Step 2: Update the configuration sections for other deployments - -To update the configuration sections for other components, such as User Management Service and IBM Business Automation Navigator, go back to the relevant upgrade page to follow their upgrade documents to update your custom resource YAML file. - -Upgrade pages: - - [Managed OpenShift upgrade page](../platform/roks/upgrade.md) - - [OpenShift upgrade page](../platform/ocp/upgrade.md) - - [Certified Kubernetes upgrade page](../platform/k8s/upgrade.md) diff --git a/FNCM/configuration/fncm_ban_sample_cr.yaml b/FNCM/configuration/fncm_ban_sample_cr.yaml index 9bd500bb..9c7ddf15 100644 --- a/FNCM/configuration/fncm_ban_sample_cr.yaml +++ b/FNCM/configuration/fncm_ban_sample_cr.yaml @@ -37,8 +37,8 @@ spec: ad: lc_ad_gc_host: "" lc_ad_gc_port: "" - lc_user_filter: "(&(cn=%v)(objectclass=person))" - lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" tds: lc_user_filter: "(&(cn=%v)(objectclass=person))" lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" @@ -64,8 +64,8 @@ spec: ad: lc_ad_gc_host: "" lc_ad_gc_port: "" - lc_user_filter: "(&(cn=%v)(objectclass=person))" - lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" tds: lc_user_filter: "(&(cn=%v)(objectclass=person))" lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" @@ -161,6 +161,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/fncm/cpe tag: ga-554-p8cpe-if001 @@ -222,6 +225,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/fncm/css tag: ga-554-p8css-if001 @@ -270,6 +276,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/fncm/cmis tag: ga-304-cmis-if010 @@ -345,6 +354,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/fncm/graphql tag: ga-554-p8cgql-if001 @@ -400,6 +412,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/fncm/extshare tag: ga-307-es-if002 @@ -459,6 +474,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/fncm/taskmgr tag: ga-307-tm-if002 @@ -526,6 +544,9 @@ spec: arch: amd64: "3 - Most preferred" replica_count: 1 + # Set UID within value of openshift.io/sa.scc.uid-range in namespace's specs, + # If comment it, will use UID auto assigned by OCP + # run_as_user: 1000110001 image: repository: cp.icr.io/cp/cp4a/ban/navigator tag: ga-307-icn-if002 diff --git a/IAWS/README_config.md b/IAWS/README_config.md deleted file mode 100644 index bbf6fa75..00000000 --- a/IAWS/README_config.md +++ /dev/null @@ -1,1152 +0,0 @@ -# Configuring IBM Automation Workstream Services 20.0.1 -Learn how to configure IBM Automation Workstream Services. - - -## Table of contents -- [Introduction](#Introduction) -- [Automation Workstream Services component details](#Automation-Workstream-Services-component-details) -- [Resources required](#Resources-required) -- [Prerequisites](#Prerequisites) -- [Step 1: Preparing to install Automation Workstream Services for production](#Step-1-Preparing-to-install-Automation-Workstream-Services-for-production) - - [Setting up an OpenShift environment](#Setting-up-an-OpenShift-environment) - - [Preparing SecurityContextConstraints](#Preparing-SecurityContextConstraints) -- [Step 2: Preparing databases for Automation Workstream Services](#Step-2-Preparing-databases-for-Automation-Workstream-Services) - - [Creating the database for Automation Workstream Services](#Creating-the-database-for-Automation-Workstream-Services) - - [(Optional) Db2 SSL Configuration](#Optional-Db2-SSL-Configuration) - - [(Optional) Db2 HADR Configuration](#Optional-Db2-HADR-Configuration) -- [Step 3: Preparing to configure LDAP](#Step-3-Preparing-to-configure-LDAP) -- [Step 4: Preparing storage](#Step-4-Preparing-storage) - - [Preparing storage for Process Federation Server](#Preparing-storage-for-Process-Federation-Server) - - [Preparing storage for Java Messaging Service](#Preparing-storage-for-Java-Messaging-Service) -- [Step 5: Protecting sensitive configuration data](#Step-5-Protecting-sensitive-configuration-data) - - [Creating required secrets for Automation Workstream Services](#Creating-required-secrets-for-Automation-Workstream-Services) - - [Creating the Lombardi custom secret](#Creating-the-lombardi-custom-secret) -- [Step 6: Configuring the Custom Resource YAML file to deploy Automation Workstream Services](#Step-6-Configuring-the-Custom-Resource-YAML-file-to-deploy-Automation-Workstream-Services) - - [Accepting the dba license in the operator.yaml file](#accepting-the-dba-license-in-the-operatoryaml-file) - - [Adding the prerequisite configuration sections](#Adding-the-prerequisite-configuration-sections) - - [Adding the required Automation Workstream Services configuration sections](#Adding-the-required-Automation-Workstream-Services-configuration-sections) - - [Custom configuration](#Custom-configuration) -- [Step 7: Completing the installation](#Step-7-Completing-the-installation) -- [Step 8: Verifying Automation Workstream Services](#Step-8-Verifying-Automation-Workstream-Services) -- [Limitations](#Limitations) -- [Troubleshooting](#Troubleshooting) - - - -## Introduction -The IBM Automation Workstream Services operator deploys the Workstream server, a server engine that runs workstreams that are configured and launched in IBM Workplace. - - -## Automation Workstream Services component details -The standard configuration includes these components: - -- IBM Business Automation Workstream Server component -- IBM Java Messaging Service component -- IBM Process Federation Server component - -To support those components, a standard installation generates the following content: - -- 7 ConfigMaps that manage the configuration -- 1 StatefulSet running Java Messaging Service -- 1 StatefulSet running Workstream server -- 1 StatefulSet running Process Federation Server -- 1 deployment for Process Federation Server -- 7 or more jobs for Workstream server -- 4 service accounts with related role and role binding -- 20 or more secrets to gain access during installation -- 7 services and Route to route the traffic to the IBM Business Automation Application Engine (App Engine) - - -## Resources required -Follow the instructions in [Planning your installation](https://docs.openshift.com/container-platform/3.11/install/index.html#single-master-single-box). Then, based on your environment, check the required resources in [System and environment requirements](https://docs.openshift.com/container-platform/3.11/install/prerequisites.html) and set up your environment. - -| Component name | Container | CPU | Memory | -| --- | --- | --- | --- | -| IBM Automation Workstream Services | Workstream container | 2 | 3Gi | -| IBM Automation Workstream Services | Init containers | 200m | 128Mi | -| IBM Automation Workstream Services | IBM Java Messaging Service containers | 500m | 512Mi | -| IBM Automation Workstream Services | IBM Process Federation Service containers | 1500m | 2560Mi | - -You will need the following storage space: -- 5 GB for Process Federation Service log data -- 10 GB for Process Federation Service Elasticsearch data -- 1 GB for Java Messaging Service data - - -## Prerequisites -- [OpenShift 3.11 or later](https://docs.openshift.com/container-platform/3.11/welcome/index.html) -- [IBM Db2 11.5](https://www.ibm.com/products/db2-database) -- [User Management Service](../UMS/README_config.md) -- [IBM Business Automation Application Engine](../AAE/README_config.md) -- [IBM Business Automation Navigator](../BAN/README_config.md) -- [IBM FileNet Content Manager](../FNCM/README_config.md) - -## Step 1: Preparing to install Automation Workstream Services for production -In addition to performing the steps required to set up the operator environment, complete the following steps before you install Automation Workstream Services. - -### Setting up an OpenShift environment -Before you can prepare to install Automation Workstream Services, complete [Step 1 to Step 5](../platform/ocp/install.md) in "Installing Cloud Pak for Automation on Red Hat OpenShift." - - -### Preparing SecurityContextConstraints - -#### Creating a SecurityContextConstraint for Process Federation Server - -For Process Federation Server, the pods running Elasticsearch require the hosting worker nodes to be configured to: -- [Disable memory swapping](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/setup-configuration-memory.html) by setting the sysctl value `vm.swappiness` to 1. - -- [Increase the limit on the number of open files descriptors](https://www.elastic.co/guide/en/elasticsearch/reference/6.8/file-descriptors.html) for the user running Elasticsearch by setting sysctl value `vm.max_map_count` to 65,536 or higher. - -If [privileged container](https://kubernetes.io/docs/concepts/workloads/pods/pod/#privileged-mode-for-pod-containers) is not allowed and the `pfs_configuration.elasticsearch.privileged` property is set to `false` in the Custom Resource configuration, you must ask the cluster administrator to execute the following sample command to change the swap and max_map_count: - -``` -sysctl -w vm.max_map_count=262144 && sed -i '/^vm.max_map_count /d' /etc/sysctl.conf && echo 'vm.max_map_count = 262144' >> /etc/sysctl.conf && sysctl -w vm.swappiness=1 && sed -i '/^vm.swappiness /d' /etc/sysctl.conf && echo 'vm.swappiness=1' >> /etc/sysctl.conf -``` - -If you are allowed to run [privileged container](https://kubernetes.io/docs/concepts/workloads/pods/pod/#privileged-mode-for-pod-containers), then setting the `pfs_configuration.elasticsearch.privileged` value to `true` will take care of updating the node configuration using a privileged init container, which will execute the appropriate `sysctl` commands. You must create a SecurityContextConstraint (SCC) for Process Federation Server that contains the following content and save it to the `ibm-pfs-privileged-scc.yaml` file. Then, add this `ibm-pfs-privileged-scc` SCC to the `-ibm-pfs-es-service-account` Process Federation Server Elasticsearch default service account in the current namespace. - -PFS Privileged Security Context Constraint(SCC) definition: - -```yaml -apiVersion: security.openshift.io/v1 -kind: SecurityContextConstraints -metadata: - name: ibm-pfs-privileged-scc -allowHostDirVolumePlugin: true -allowHostIPC: true -allowHostNetwork: true -allowHostPID: true -allowHostPorts: true -allowPrivilegedContainer: true -allowPrivilegeEscalation: true -allowedCapabilities: -- '*' -allowedFlexVolumes: [] -allowedUnsafeSysctls: -- '*' -defaultAddCapabilities: [] -defaultAllowPrivilegeEscalation: true -forbiddenSysctls: [] -fsGroup: - type: RunAsAny -readOnlyRootFilesystem: false -requiredDropCapabilities: [] -runAsUser: - type: RunAsAny -seccompProfiles: -- '*' -seLinuxContext: - type: RunAsAny -supplementalGroups: - type: RunAsAny -volumes: -- '*' -priority: 2 -``` - -Run the following commands: - -```sh -$ oc create serviceaccount ibm-pfs-es-service-account -$ oc apply -f ibm-pfs-privileged-scc.yaml -$ oc adm policy add-scc-to-user ibm-pfs-privileged-scc -z ibm-pfs-es-service-account -``` - -**Tip:** You can use the [`getSCCs.sh`](https://github.com/IBM/cloud-pak/tree/master/samples/utilities) bash script, which displays all the SecurityContextConstraints resources that are mapped to each of the ServiceAccount users in the specified namespace (or project). - -**Note:** Specify the value of the `pfs_configuration.elasticsearch.service_account` property for the newly created service account `ibm-pfs-es-service-account` in your Custom Resource configuration file. Don't set the value of the `pfs_configuration.pfs.service_account` property to this service account. - - -## Step 2: Preparing databases for Automation Workstream Services -### Creating the database for Automation Workstream Services -Create the database for Automation Workstream Services by running the following script on the Db2 server: -```sql -create database automatic storage yes using codeset UTF-8 territory US pagesize 32768; --- connect to the created database: -connect to ; --- A user temporary tablespace is required to support stored procedures in BPM. -CREATE USER TEMPORARY TABLESPACE USRTMPSPC1; -UPDATE DB CFG FOR USING LOGFILSIZ 16384 DEFERRED; -UPDATE DB CFG FOR USING LOGSECOND 64 IMMEDIATE; --- The following grant is used for databases without enhanced security. --- For more information, review the IBM Knowledge Center for Enhancing Security for DB2. -grant dbadm on database to user ; -connect reset; -``` - -**Notes:** -- Replace `` with the IBM Automation Workstream Services database name you want, for example, BPMDB. -- Replace `` with the user you will use for the database. - - -### (Optional) Db2 SSL Configuration -To ensure that all communications between the Business Automation Workstream server and Db2 are encoded, you must import the database CA Certificate to the Business Automation Workstream server. To do so, you must create a secret to store the certificate: -``` -kubectl create secret generic ibm-dba-baw-db2-cacert --from-file=cacert.crt= -``` - -**Note:** You must modify the part that points to the certificate file. Do not change the part --from-file=cacert.crt=. - -You can then use the resulting secret to set the `iaws_configuration[x].iaws_server.database.sslsecretname: ibm-dba-baw-db2-cacert`, while setting `iaws_configuration[x].iaws_server.database.ssl` to `true`. - -### (Optional) Db2 HADR Configuration -If you use Db2 as your database, you can configure high availability by setting up HADR for the Workstream server database. This configuration ensures that the Workstream server automatically retrieves the necessary failover server information when it first connects to the database. As part of the setup, you must provide a comma-separated list of failover servers and failover ports. - -For example, if there are two failover servers: - - server1.db2.customer.com on port 50443 - server2.db2.customer.com on port 51443 - -you can specify these hosts and ports in the Custom Resource configuration YAML file as follows: -```yaml -database: - ... ... - hadr: - standbydb_host: server1.db2.customer.com, server2.db2.customer.com - standbydb_port: 50443,51443 - retryinterval: - maxretries: - ... ... -``` - - - -## Step 3: Preparing to configure LDAP -An LDAP server is required before you install Automation Workstream Services. You can create the LDAP server secret by refering to [LDAP configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html). - -## Step 4: Preparing storage - -### Preparing storage for Process Federation Server - -#### Using existing storage classes - -If you have existing storage classes for Process Federation Server logs, Process Federation Server outputs, and Elasticsearch storage, you can use dynamic provisioning by making the following updates in the Custom Resource configuration file: - -```yaml -pfs_configuration: - pfs: - output: - storage: - use_dynamic_provisioning: true - storage_class: "" - ... - logs: - storage: - use_dynamic_provisioning: true - storage_class: "" - ... - elasticsearch: - storage: - persistent: true - use_dynamic_provisioning: true - storage_class: "" - ... -``` - -#### Preparing your own storage classes - -If you don’t have existing storage classes, the following example illustrates the procedure using Network File System (NFS) to create your own storage classes. An existing NFS server is required before you can create persistent volumes (PVs), persistent volume claims (PVCs), and related folders. The deployment process uses these volumes and folders during the deployment. - -- Create the required folders on an NFS server. For the NFS server, you must grant minimal privileges. In the `/etc/exports` configuration file, add the following line at the end: -``` - *(rw,sync,no_subtree_check) -``` - -**Notes:** -- `` should be an individual directory and not shared with other components. -- **Restart the NFS service** after editing and saving the `/etc/exports` configuration file. - - -Give the least privilege to the mounted directories using the following commands: -```bash -sudo mkdir /pfs-es-0 -sudo mkdir /pfs-es-1 -sudo mkdir /pfs-logs-0 -sudo mkdir /pfs-logs-1 -sudo mkdir /pfs-output-0 -sudo mkdir /pfs-output-1 - -chown -R :65534 /pfs-* -chmod g+rw /pfs-* -``` - -- Create the PVs required for the Process Federation Server. - -Save the following YAML files on the OpenShift master node and run the `oc apply -f ` commands in the following order. - -1. pfs-pv-pfs-es-0.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pfs-es-0 -spec: - storageClassName: "pfs-es" - accessModes: - - ReadWriteOnce - capacity: - storage: 10Gi - nfs: - path: /pfs-es-0 - server: - persistentVolumeReclaimPolicy: Recycle -``` - -2. pfs-pv-pfs-es-1.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pfs-es-1 -spec: - storageClassName: "pfs-es" - accessModes: - - ReadWriteOnce - capacity: - storage: 10Gi - nfs: - path: /pfs-es-1 - server: - persistentVolumeReclaimPolicy: Recycle -``` - -3. pfs-pv-pfs-logs-0.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pfs-logs-0 -spec: - storageClassName: "pfs-logs" - accessModes: - - ReadWriteOnce - capacity: - storage: 5Gi - nfs: - path: /pfs-logs-0 - server: - persistentVolumeReclaimPolicy: Recycle -``` - -4. pfs-pv-pfs-logs-1.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pfs-logs-1 -spec: - storageClassName: "pfs-logs" - accessModes: - - ReadWriteOnce - capacity: - storage: 5Gi - nfs: - path: /pfs-logs-1 - server: - persistentVolumeReclaimPolicy: Recycle -``` - -5. pfs-pv-pfs-output-0.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pfs-output-0 -spec: - storageClassName: "pfs-output" - accessModes: - - ReadWriteOnce - capacity: - storage: 5Gi - nfs: - path: /pfs-output-0 - server: - persistentVolumeReclaimPolicy: Recycle -``` - -6. pfs-pv-pfs-output-1.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: pfs-output-1 -spec: - storageClassName: "pfs-output" - accessModes: - - ReadWriteOnce - capacity: - storage: 5Gi - nfs: - path: /pfs-output-1 - server: - persistentVolumeReclaimPolicy: Recycle -``` - -**Notes:** -- Replace `` with the Process Federation Server storage folder on your NFS server. -- Replace `` with your NFS server IP address. - -Make the following changes to the Custom Resource configuration file: - -```yaml -pfs_configuration: - pfs: - output: - storage: - use_dynamic_provisioning: false - storage_class: "pfs-output" - ... - logs: - storage: - use_dynamic_provisioning: false - storage_class: "pfs-logs" - ... - elasticsearch: - storage: - persistent: true - use_dynamic_provisioning: false - storage_class: "pfs-es" - ... -``` - -### Preparing storage for Java Messaging Service - -#### Using existing storage classes - -If you have existing storage classes for Java Messaging Service (JMS), you can use dynamic provisioning by making the following updates in the Custom Resource configuration file: - -```yaml -iaws_configuration: - - name: instance1 - iaws_server: - jms: - storage: - persistent: true - use_dynamic_provisioning: true - access_modes: - - ReadWriteOnce - storage_class: "" - ... -``` - -#### Preparing your own storage classes - -If you don’t have existing storage classes for JMS, the following example illustrates the procedure using NFS to create your own storage classes. An existing NFS server is required before creating PV and related folders. - -- Create the required folders on an NFS server. For the NFS server, you must grant minimal privileges. In the `/etc/exports` configuration file, add the following line at the end: -``` - *(rw,sync,no_subtree_check) -``` - -**Notes:** -- `` should be an individual directory and not shared with other components. -- **Restart the NFS service** after editing and saving the `/etc/exports` configuration file. - -Give the least privilege to the mounted directories using the following commands: -```bash -sudo mkdir /jms -chown -R :65534 /jms -chmod g+rw /jms -``` - -- Create the PVs required for JMS. - -Save the following YAML files on the OpenShift master node and run the `oc apply -f ` command. - -jms-pv.yaml -```yaml -apiVersion: v1 -kind: PersistentVolume -metadata: - name: jms-pv -spec: - storageClassName: "jms-storage-class" - accessModes: - - ReadWriteOnce - capacity: - storage: 1Gi - nfs: - path: /jms - server: - persistentVolumeReclaimPolicy: Recycle -``` - -**Notes:** -- Replace `` with the JMS storage folder on your NFS server. -- `accessModes` should be set to the same value as the `iaws_configuration[x].iaws_server.jms.storage.access_modes` property in the Custom Resource configuration file. -- Replace `` with your NFS server IP address. - -Make the following changes to the Custom Resource configuration file: - -```yaml -iaws_configuration: - - name: instance1 - iaws_server: - jms: - storage: - persistent: true - use_dynamic_provisioning: false - access_modes: - - ReadWriteOnce - storage_class: "jms-storage-class" - ... -``` - - -## Step 5: Protecting sensitive configuration data -### Creating required secrets for Automation Workstream Services -A secret is an object that contains a small amount of sensitive data such as a password, a token, or a key. Before you install Automation Workstream Services, you must create the following secrets manually by saving the content in a YAML file and running the `oc apply -f ` command on the OpenShift master node. - -Shared encryption key secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: icp4a-shared-key-secret -type: Opaque -data: - encryptionKey: -``` -**Notes:** -- So that the confidential information is shared only between the components that hold the key, use the encryptionKey to encrypt the confidential information at the RR. -- Ensure the encryptionKey is **base64** encoded. - -Business Automation Workstream server database secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: ibm-baw-wfs-server-db-secret -type: Opaque -data: - dbUser: - password: -``` -**Notes:** -- `dbUser` and `password` are the database user name and password. -- Ensure all values under data are **base64** encoded. - -Process Federation Server: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: ibm-pfs-admin-secret -type: Opaque -data: - ltpaPassword: - oidcClientPassword: - sslKeyPassword: -``` - -**Notes:** -- `sslKeyPassword` is used as the keystore and trust store password. -- `oidcClientPassword` is registered with User Management Service (UMS) as the OIDC client password. -- Ensure all values under data are **base64** encoded. - -### Creating the Lombardi custom secret -#### 1. Save the following content in a file named '100Custom.xml'. -```xml - - - - - - true - - - -``` - -#### 2. Create the Lombardi custom secret. -Run the following command on the OpenShift master node: -``` -kubectl create secret generic wfs-lombardi-custom-xml-secret --from-file=sensitiveCustomConfig=./100Custom.xml -``` - -**Note:** To overwrite the Lombardi configuration settings, specify the value of the `iaws_configuration[x].iaws_server.lombardi_custom_xml_secret_name` property as the newly created secret name `wfs-lombardi-custom-xml-secret` in the Custom Resource configuration file. - - - -## Step 6: Configuring the Custom Resource YAML file to deploy Automation Workstream Services -### Accepting the dba license in the operator.yaml file -Make sure that you accept the dba license in the operator.yaml file. Set the value of the dba_license property to "accept". - -### Adding the prerequisite configuration sections -Make sure that you've set the configuration parameters for the following components in your copy of the template Custom Resource YAML file: - -- [User Management Service](../UMS/README_config.md) -- [Business Automation Application Engine](../AAE/README_config.md) -- [Business Automation Navigator](../BAN/README_config.md) -- [FileNet Content Manager](../FNCM/README_config.md) - -**Note:** -Check the values of `spec.initialize_configuration`. See [IBM FileNet Content Manager initialization settings](../FNCM/README_config.md#initialization-settings) and [Initialization parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_cm_opinitiparams.html) for the correct settings. - -### Adding the required Automation Workstream Services configuration sections -Edit your copy of the template Custom Resource YAML file and make the following updates: -- Uncomment and update the shared_configuration section if you haven't done it already. - -- Update the `iaws_configuration` and `pfs_configuration` sections. - To install Automation Workstream Services, replace the contents of the `iaws_configuration` and `pfs_configuration` sections in your copy of the template Custom Resource YAML with the values from the [sample_min_value.yaml](./configuration/sample_min_value.yaml) file. - -- Make sure that iaws_configuration[x].iaws_server.admin_user is the administrator for the Workstream server, and that admin_user is an existing LDAP user. - -### Custom configuration -If you want to customize your custom resource YAML file, refer to the [configuration list](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_iaws_params.html) to update the required values of each parameter according to your environment. - - - -## Step 7: Completing the installation -Go back to the relevant installation or update page to configure other components and complete the deployment with the operator. - -Installation pages: - - [Managed OpenShift installation page](../platform/roks/install.md) - - [OpenShift installation page](../platform/ocp/install.md) - - [Certified Kubernetes installation page](../platform/k8s/install.md) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) - - - -## Step 8: Verifying Automation Workstream Services -1. To verify the installation of Automation Workstream Services, get the name of the pods that were deployed by running the following command: -``` -oc get pod -n -``` - -
-
-Click to show a successful Automation Workstream Services pod status. -

- -``` -NAME READY STATUS RESTARTS AGE -demo-cmis-deploy-647c9b94b8-j4qq9 1/1 Running 0 30m -demo-cpe-deploy-7bbd949659-6s4sx 1/1 Running 0 35m -demo-dba-rr-00160d67de 1/1 Running 0 43m -demo-dba-rr-3d52541b1f 1/1 Running 0 43m -demo-dba-rr-42c6649189 1/1 Running 0 43m -demo-ibm-pfs-0 1/1 Running 0 5m19s -demo-ibm-pfs-dbareg-54d9db6cf5-98lwt 1/1 Running 0 4m56s -demo-ibm-pfs-elasticsearch-0 2/2 Running 0 5m32s -demo-ibm-pfs-umsregistry-job-8vjt7 0/1 Completed 0 5m10s -demo-instance1-aae-ae-db-job-kwlj4 0/1 Completed 0 15m -demo-instance1-aae-ae-deployment-d69c5bff7-cq6zd 1/1 Running 0 14m -demo-instance1-aae-ae-oidc-job-t4zks 0/1 Completed 0 14m -demo-instance1-baw-jms-0 1/1 Running 0 5m58s -demo-instance1-ibm-iaws-ibm-workplace-init-job-8qlgz 0/1 Completed 0 7m41s -demo-instance1-ibm-iaws-server-0 1/1 Running 0 7m23s -demo-instance1-ibm-iaws-server-content-init-job-fjlwq 0/1 Completed 0 7m32s -demo-instance1-ibm-iaws-server-database-init-job-ll5c6 0/1 Completed 0 8m -demo-instance1-ibm-iaws-server-database-init-job-pfs-wqm8b 0/1 Completed 0 7m54s -demo-instance1-ibm-iaws-server-ltpa-l9rnr 0/1 Completed 0 8m7s -demo-instance1-ibm-iaws-server-umsregistry-job-m27h6 0/1 Completed 0 7m49s -demo-navigator-deploy-5dc6967445-2x998 1/1 Running 0 22m -demo-rr-setup-pod 0/1 Completed 0 43m -demo-ums-deployment-5d6d65cd69-mrpcf 1/1 Running 0 41m -demo-ums-ltpa-creation-job-cgn4j 0/1 Completed 0 42m -ibm-cp4a-operator-fbb9d454d-hj5wh 2/2 Running 0 44m - -``` - -

-
-
- -2. For each pod, check under Events to see that the images were successfully pulled and the containers were created and started by running the following command with the specific pod name: -``` -oc describe pod -n -``` - - -## Limitations - -* Automation Workstream Services supports only the IBM Db2 database. - -* Elasticsearch limitation - - **Note:** The following limitation only applies if you are updating an Automation Workstream Services deployment that uses the embedded Elasticsearch statefulset - - * Scaling Elasticsearch statefulset - - In the Elasticsearch configuration, the [discovery.zen.minimun_master_nodes property](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/discovery-settings.html#minimum_master_nodes) is automatically set by the operator to the quorum of replicas of the Elasticsearch statefulset. If, during an update, the pfs_configuration.elasticsearch.replicas value is changed, and the change leads to a new computed value for the discovery.zen.minimun_master_nodes configuration property, then all currently running Elasticsearch pods will have to be restarted. During this restart of the pods, there will be a temporary interruption of Elasticsearch and Process Federation Server services. - * Elasticsearch High Availability - - In the Elasticsearch configuration, the [discovery.zen.minimun_master_nodes property](https://www.elastic.co/guide/en/elasticsearch/reference/6.7/discovery-settings.html#minimum_master_nodes) is automatically set by the operator to the quorum of replicas of the Elasticsearch statefulset. If at some point, some Elasticsearch pods are failing, and the number of running Elasticearch pods is less than the quorum of replicas of the Elasticsearch statefulset, there will be an interruption of Elasticsearch and PFS services, until at least the quorum of running Elasticsearch pods is satisfied again. - -* Resource Registry limitation - - Because of the design of etcd, it's recommended that you don't change the replica size after you create the Resource Registry cluster to prevent data loss. If you must set the replica size, set it to an odd number. If you reduce the pod size, the pods are destroyed one by one slowly to prevent data loss or the cluster from becoming out of sync. - * If you update the Resource Registry admin secret to change the username or password, first delete the -dba-rr- pods to cause Resource Registry to enable the updates. Alternatively, you can enable the update manually with etcd commands. - * If you update the Resource Registry configurations in the icp4acluster custom resource instance, the update might not affect the Resource Registry pod directly. It will affect the newly created pods when you increase the number of replicas. - -* The App Engine trusts only Certification Authority (CA) because of a Node.js server limitation. If an external service is used and signed with another root CA, you must add the root CA as trusted instead of the service certificate. - - * The certificate can be self-signed, or signed by a well-known CA. - * If you're using a depth zero self-signed certificate, it must be listed as a trusted certificate. - * If you're using a certificate signed by a self-signed CA, the self-signed CA must be in the trusted list. Using a leaf certificate in the trusted list is not supported. - * If you're adding the root CA of two or more external services to the App Engine trust list, you can't use the same common name for those root CAs. - - - -## Troubleshooting - -- How to check Automation Workstream Services detailed version information - -Run the `docker inspect cp.icr.io/cp/cp4a/iaws/iaws-server:20.0.1` command to see the specific version of Workstream server. On OpenShift Container Platform 4.x, use `podman inspect cp.icr.io/cp/cp4a/iaws/iaws-server:20.0.1` instead: -``` -... -"Labels": { - "architecture": "x86_64", - "authoritative-source-url": "registry.access.redhat.com", - "build-date": "2020-01-28T10:53:49.652277", - "com.ibm.dba.workstream.build-date": "20200312", - "com.ibm.dba.workstream.build-level": "20200312-074526", - "com.ibm.dba.workstream.ifixes": "[]", - "com.ibm.dba.workstream.version": "20.0.1", - "com.redhat.build-host": "cpt-1007.osbs.prod.upshift.rdu2.redhat.com", - "com.redhat.component": "ubi7-container", - "com.redhat.license_terms": "https://www.redhat.com/en/about/red-hat-end-user-license-agreements#UBI", - "description": "Workstream Server Container provides a server engine that runs workstreams", - "distribution-scope": "public", - "io.k8s.description": "The Universal Base Image is designed and engineered to be the base layer for all of your containerized applications, middleware and utilities. This base imag -e is freely redistributable, but Red Hat only supports Red Hat technologies through subscriptions for Red Hat products. This image is maintained by Red Hat and updated regularly.", - "io.k8s.display-name": "Red Hat Universal Base Image 7", - "io.openshift.tags": "base rhel7", - "maintainer": "Red Hat, Inc.", - "name": "Workstream Server", - "release": "20.0.1", - "summary": "Workstream Server Container is an application container", - "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/ubi7/images/7.7-310", - "vcs-ref": "4c80c8aa26e69950ab11b87789c8fb7665b1632d", - "vcs-type": "git", - "vendor": "IBM", - "version": "20.0.1" - } -... -``` - -- How to check Automation Workstream Services pod status and related logs - -There are 12 Automation Workstream Services-related pods in total. Run the `oc get pod` command to see the status of each pod: -``` -NAME READY STATUS RESTARTS AGE -demo-ibm-pfs-0 1/1 Running 0 5m19s -demo-ibm-pfs-dbareg-54d9db6cf5-98lwt 1/1 Running 0 4m56s -demo-ibm-pfs-elasticsearch-0 2/2 Running 0 5m32s -demo-ibm-pfs-umsregistry-job-8vjt7 0/1 Completed 0 5m10s -demo-instance1-baw-jms-0 1/1 Running 0 5m58s -demo-instance1-ibm-iaws-ibm-workplace-init-job-8qlgz 0/1 Completed 0 7m41s -demo-instance1-ibm-iaws-server-0 1/1 Running 0 7m23s -demo-instance1-ibm-iaws-server-content-init-job-fjlwq 0/1 Completed 0 7m32s -demo-instance1-ibm-iaws-server-database-init-job-ll5c6 0/1 Completed 0 8m -demo-instance1-ibm-iaws-server-database-init-job-pfs-wqm8b 0/1 Completed 0 7m54s -demo-instance1-ibm-iaws-server-ltpa-l9rnr 0/1 Completed 0 8m7s -demo-instance1-ibm-iaws-server-umsregistry-job-m27h6 0/1 Completed 0 7m49s -... -``` - -For pods controlled by Job, the desired `STATUS` is `Completed` and desired `READY` is `0/1`, while for pods controlled by Deployment or StatefulSet, the desired `STATUS` is `Running` and desired `READY` is `1/1` or `2/2`. You can see detailed information for each pod by running the `oc describe pod ` command, and you can see detailed logs by running the `oc logs ` command. Although a pod should be in the `Running` Status at first, if a pod doesn‘t change its status, you can use the previous commands to determine what’s causing the blocks. - -
-
-Click to show an example of how to analyze the Pod "demo-instance1-ibm-iaws-server-0". -

- -```yaml -[root@borstal-inf ~]# oc describe pod demo-instance1-ibm-iaws-server-0 -Name: demo-instance1-ibm-iaws-server-0 -Namespace: demo-project -Priority: 0 -Node: worker0.borstal.os.fyre.ibm.com/ -Start Time: Thu, 19 Mar 2020 08:06:13 -0700 -Labels: app.kubernetes.io/component=server - app.kubernetes.io/instance=demo-instance1 - app.kubernetes.io/managed-by=Operator - app.kubernetes.io/name=workflow-server - app.kubernetes.io/version=20.0.1 - controller-revision-hash=demo-instance1-ibm-iaws-server-868f989df6 - release=20.0.1 - statefulset.kubernetes.io/pod-name=demo-instance1-ibm-iaws-server-0 -Annotations: cloudpakId: 94a9c8c358bb43ba8fbdea62e7e166a5 - cloudpakName: IBM Cloud Pak for Automation - cloudpakVersion: 20.0.1 - jvmOptionsConfigurationChecksum: da39a3ee5e6b4b0d3255bfef95601890afd80709 - k8s.v1.cni.cncf.io/networks-status: - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.254.9.15" - ], - "dns": {}, - "default-route": [ - "10.254.8.1" - ] - }] - openshift.io/scc: dbamc - productChargedContainers: wf-ps - productCloudpakRatio: 1:5 - productID: 534103df30f0477bb45ec3e02ef6aba0 - productMetric: VIRTUAL_PROCESSOR_CORE - productName: IBM Cloud Pak for Automation - Automation Workstream Services - productVersion: 20.0.1 -Status: Running -IP: 10.254.9.15 -IPs: - IP: 10.254.9.15 -Controlled By: StatefulSet/demo-instance1-ibm-iaws-server -Init Containers: - ssl-init-container: - Container ID: cri-o://d746ac147622e4f236df4469ef24263c2eec9df90d85555e0f159259cf8458a7 - Image: image-registry.openshift-image-registry.svc:5000/demo-project/dba-keytool-initcontainer@sha256:a428892c7144640f9cf4e15120be4af9c7d1470fd6bf5e6fc8e3294b2feb2147 - Image ID: image-registry.openshift-image-registry.svc:5000/demo-project/dba-keytool-initcontainer@sha256:a428892c7144640f9cf4e15120be4af9c7d1470fd6bf5e6fc8e3294b2feb2147 - Port: - Host Port: - State: Terminated - Reason: Completed - Exit Code: 0 - Started: Thu, 19 Mar 2020 08:06:22 -0700 - Finished: Thu, 19 Mar 2020 08:06:45 -0700 - Ready: True - Restart Count: 0 - Limits: - cpu: 500m - memory: 256Mi - Requests: - cpu: 200m - memory: 128Mi - Environment: - KEYTOOL_ACTION: GENERATE-BOTH - KEYSTORE_PASSWORD: Optional: false - Mounts: - /shared/resources/cert-trusted from trust-tls-volume (rw) - /shared/resources/keypair from keypair-secret (rw) - /shared/tls from key-trust-store (rw) - /var/run/secrets/kubernetes.io/serviceaccount from demo-instance1-ibm-iaws-sa-token-4zmxj (ro) - dbcompatibility-init-container: - Container ID: cri-o://a334e75dae19335d6fc0af4060726e79dd2fe46d3fe7102ed758a724d6c33a3f - Image: image-registry.openshift-image-registry.svc:5000/demo-project/dba-dbcompatibility-initcontainer@sha256:7f03cacee6332b9f1e8f1d506123b1cd98574c07294638418cb37d29670b0e1b - Image ID: image-registry.openshift-image-registry.svc:5000/demo-project/dba-dbcompatibility-initcontainer@sha256:7f03cacee6332b9f1e8f1d506123b1cd98574c07294638418cb37d29670b0e1b - Port: - Host Port: - State: Terminated - Reason: Completed - Exit Code: 0 - Started: Thu, 19 Mar 2020 08:06:46 -0700 - Finished: Thu, 19 Mar 2020 08:07:07 -0700 - Ready: True - Restart Count: 0 - Limits: - cpu: 500m - memory: 256Mi - Requests: - cpu: 200m - memory: 128Mi - Environment: - EXPECTED_SCHEMA_VERSION: 1.0.0 - DATABASE_TYPE: DB2 - DATABASE_HOST_NAME: - DATABASE_PORT: 50000 - DATABASE_NAME: BPMDB - DATABASE_USER: Optional: false - DATABASE_PWD: Optional: false - DATABASE_SCHEMA: Optional: false - SCHEMA_VERSION_TABLE_NAME: PFS_SCHEMA_PROPERTIES - SCHEMA_VERSION_KEY_NAME: Version - SCHEMA_VERSION_KEY_COLUMN_NAME: KEY - SCHEMA_VERSION_VALUE_COLUMN_NAME: VALUE - DATABASE_ALTERNATE_PORT: 0 - RETRY_INTERVAL_FOR_CLIENT_REROUTE: 600 - MAX_RETRIES_FOR_CLIENT_REROUTE: 5 - Mounts: - /var/run/secrets/kubernetes.io/serviceaccount from demo-instance1-ibm-iaws-sa-token-4zmxj (ro) - bawdbcompatibility-init-container: - Container ID: cri-o://e612b9dede1bd31e025b7b93fbedc3adaddeb6aa1e1ba249442bb88a797abdb5 - Image: image-registry.openshift-image-registry.svc:5000/demo-project/dba-dbcompatibility-initcontainer@sha256:7f03cacee6332b9f1e8f1d506123b1cd98574c07294638418cb37d29670b0e1b - Image ID: image-registry.openshift-image-registry.svc:5000/demo-project/dba-dbcompatibility-initcontainer@sha256:7f03cacee6332b9f1e8f1d506123b1cd98574c07294638418cb37d29670b0e1b - Port: - Host Port: - State: Terminated - Reason: Completed - Exit Code: 0 - Started: Thu, 19 Mar 2020 08:07:08 -0700 - Finished: Thu, 19 Mar 2020 08:09:24 -0700 - Ready: True - Restart Count: 0 - Limits: - cpu: 500m - memory: 256Mi - Requests: - cpu: 200m - memory: 128Mi - Environment: - EXPECTED_SCHEMA_VERSION: 1.2.0 - DATABASE_TYPE: DB2 - DATABASE_HOST_NAME: - DATABASE_PORT: 50000 - DATABASE_NAME: BPMDB - DATABASE_USER: Optional: false - DATABASE_PWD: Optional: false - SCHEMA_VERSION_TABLE_NAME: LSW_SYSTEM_SCHEMA - SCHEMA_VERSION_KEY_NAME: DatabaseSchemaVersion - SCHEMA_VERSION_KEY_COLUMN_NAME: PROPNAME - SCHEMA_VERSION_VALUE_COLUMN_NAME: PROPVALUE - DATABASE_ALTERNATE_PORT: 0 - RETRY_INTERVAL_FOR_CLIENT_REROUTE: 600 - MAX_RETRIES_FOR_CLIENT_REROUTE: 5 - Mounts: - /var/run/secrets/kubernetes.io/serviceaccount from demo-instance1-ibm-iaws-sa-token-4zmxj (ro) -Containers: - wf-ps: - Container ID: cri-o://8acdaa919f11964a23d47f96c02686779202439b03cebf484583dea6770ad8f8 - Image: image-registry.openshift-image-registry.svc:5000/demo-project/iaws-server@sha256:799e69949f9f0ad2554eaafc4b5825e8f4f822fb2c8f183cee6c73320934814c - Image ID: image-registry.openshift-image-registry.svc:5000/demo-project/iaws-server@sha256:799e69949f9f0ad2554eaafc4b5825e8f4f822fb2c8f183cee6c73320934814c - Port: - Host Port: - State: Running - Started: Thu, 19 Mar 2020 08:10:02 -0700 - Ready: True - Restart Count: 0 - Limits: - cpu: 3 - memory: 2096Mi - Requests: - cpu: 1 - memory: 1Gi - Readiness: exec [/bin/bash -c if [ "$(curl -sfk https://localhost:9443/ps/rest/v1/config/getProcessServerDatabaseSchemaVersion | grep -Po '(?<="status":")(.*?)(?=")')" != "200" ]; then exit 1; fi] delay=180s timeout=1s period=5s #success=1 #failure=3 - Environment: - JMS_SERVER_HOST: demo-instance1-baw-jms-service - UMS_CLIENT_ID: demo-instance1-ibm-iaws-server-oidc-client - UMS_CLIENT_SECRET: Optional: false - UMS_HOST: ums..nip.io - UMS_PORT: 443 - EXTERNAL_HOSTNAME: .nip.io - EXTERNAL_PORT: 443 - WLP_LOGGING_CONSOLE_FORMAT: json - WLP_LOGGING_MESSAGE_FORMAT: basic - LDAP_ADMIN_USER: p8admin - ADMIN_USER: Optional: false - ADMIN_PASSWORD: Optional: false - UMS_ADMIN_USER: Optional: false - UMS_ADMIN_PASSWORD: Optional: false - DB_TYPE: DB2 - DB_USER: Optional: false - DB_PASSWORD: Optional: false - DB_NAME: BPMDB - DB_HOST: - DB_PORT: 50000 - SSL_KEY_PASSWORD: Optional: false - CSRF_SESSION_TOKENSALT: Optional: false - CSRF_REFERER_WHITELIST: .nip.io,ums..nip.io,ae..nip.io,icn..nip.io - CSRF_ORIGIN_WHITELIST: https://.nip.io,https://.nip.io:443,https://ums..nip.io,https://ums..nip.io:443,https://ae..nip.io,https://ae..nip.io:443,https://icn..nip.io,https://icn..nip.io:443 - CPE_URL: https://demo-cpe-svc:9443/wsi/FNCEWS40MTOM - CMIS_URL: https://demo-cmis-svc:9443/openfncmis_wlp/services - CPE_DOMAIN_NAME: P8DOMAIN - CPE_REPOSITORY: OS10 - CPE_OBJECTSTORE_ID: {E340B318-CF17-4C14-8902-AF713D3B0A91} - CPE_USERNAME: Optional: false - CPE_PASSWORD: Optional: false - WAIT_INTERVAL: 90000 - DB_SSLCONNECTION: false - DB_SSLCERTLOCATION: fake - DBCHECK_WAITTIME: 900 - DBCHECK_INTERVALTIME: 15 - STANDBYDB_PORT: 0 - STANDBYDB_RETRYINTERVAL: 600 - STANDBYDB_MAXRETRIES: 5 - RESOURCE_REGISTRY_URL: https://rr..nip.io:443 - RESOURCE_REGISTRY_UNAME: Optional: false - RESOURCE_REGISTRY_PASSWORD: Optional: false - CLUSTERIP_SERVICE_NAME: demo-instance1-ibm-iaws-server - APPENGINE_EXTERNAL_HOSTNAME: ae..nip.io - FRAME-ANCESTORS-SETTING: https://.nip.io https://ums..nip.io https://ae..nip.io https://icn..nip.io - ENCRYPTION_KEY: Optional: false - Mounts: - /opt/ibm/wlp/output/defaultServer/resources/security/keystore/jks/server.jks from key-trust-store (rw,path="keystore/jks/server.jks") - /opt/ibm/wlp/output/defaultServer/resources/security/truststore/jks/trusts.jks from key-trust-store (rw,path="truststore/jks/trusts.jks") - /opt/ibm/wlp/usr/servers/defaultServer/config/100SCIM.xml from overwrite-configurations (rw,path="100SCIM.xml") - /opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/jvm.options from overwrite-configurations (rw,path="jvm.options") - /opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/oidc-rp.xml from overwrite-configurations (rw,path="oidc-rp.xml") - /opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/processServer_variables_system.xml from overwrite-configurations (rw,path="processServer_variables_system.xml") - /opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/security100.xml from overwrite-configurations (rw,path="security.xml") - /opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/ssl.xml from overwrite-configurations (rw,path="ssl.xml") - /opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/z-custom.xml from overwrite-configurations (rw,path="z-custom.xml") - /opt/ibm/wlp/usr/servers/defaultServer/resources/security from ltpa-store (rw) - /opt/ibm/wlp/usr/shared/resources/config from configurations (rw) - /var/run/secrets/kubernetes.io/serviceaccount from demo-instance1-ibm-iaws-sa-token-4zmxj (ro) -Conditions: - Type Status - Initialized True - Ready True - ContainersReady True - PodScheduled True -Volumes: - key-trust-store: - Type: EmptyDir (a temporary directory that shares a pod's lifetime) - Medium: - SizeLimit: - trust-tls-volume: - Type: Projected (a volume that contains injected data from multiple sources) - SecretName: icp4a-root-ca - SecretOptionalName: - SecretName: icp4a-root-ca - SecretOptionalName: - keypair-secret: - Type: Secret (a volume populated by a Secret) - SecretName: ibm-baw-tls - Optional: false - ltpa-store: - Type: Secret (a volume populated by a Secret) - SecretName: demo-instance1-ibm-iaws-server-ltpa - Optional: false - overwrite-configurations: - Type: ConfigMap (a volume populated by a ConfigMap) - Name: demo-instance1-ibm-iaws-server-overwrite-config - Optional: false - configurations: - Type: ConfigMap (a volume populated by a ConfigMap) - Name: demo-instance1-ibm-iaws-server-config - Optional: false - demo-instance1-ibm-iaws-sa-token-4zmxj: - Type: Secret (a volume populated by a Secret) - SecretName: demo-instance1-ibm-iaws-sa-token-4zmxj - Optional: false -QoS Class: Burstable -Node-Selectors: -Tolerations: node.kubernetes.io/memory-pressure:NoSchedule - node.kubernetes.io/not-ready:NoExecute for 300s - node.kubernetes.io/unreachable:NoExecute for 300s -Events: - Type Reason Age From Message - ---- ------ ---- ---- ------- - Normal Scheduled default-scheduler Successfully assigned demo-project/demo-instance1-ibm-iaws-server-0 to worker0.borstal.os.fyre.ibm.com - Normal Pulled 11m kubelet, worker0.borstal.os.fyre.ibm.com Container image "image-registry.openshift-image-registry.svc:5000/demo-project/dba-keytool-initcontainer@sha256:a428892c7144640f9cf4e15120be4af9c7d1470fd6bf5e6fc8e3294b2feb2147" already present on machine - Normal Created 11m kubelet, worker0.borstal.os.fyre.ibm.com Created container ssl-init-container - Normal Started 11m kubelet, worker0.borstal.os.fyre.ibm.com Started container ssl-init-container - Normal Pulled 11m kubelet, worker0.borstal.os.fyre.ibm.com Container image "image-registry.openshift-image-registry.svc:5000/demo-project/dba-dbcompatibility-initcontainer@sha256:7f03cacee6332b9f1e8f1d506123b1cd98574c07294638418cb37d29670b0e1b" already present on machine - Normal Created 11m kubelet, worker0.borstal.os.fyre.ibm.com Created container dbcompatibility-init-container - Normal Started 11m kubelet, worker0.borstal.os.fyre.ibm.com Started container dbcompatibility-init-container - Normal Pulled 10m kubelet, worker0.borstal.os.fyre.ibm.com Container image "image-registry.openshift-image-registry.svc:5000/demo-project/dba-dbcompatibility-initcontainer@sha256:7f03cacee6332b9f1e8f1d506123b1cd98574c07294638418cb37d29670b0e1b" already present on machine - Normal Created 10m kubelet, worker0.borstal.os.fyre.ibm.com Created container bawdbcompatibility-init-container - Normal Started 10m kubelet, worker0.borstal.os.fyre.ibm.com Started container bawdbcompatibility-init-container - Normal Pulling 8m36s kubelet, worker0.borstal.os.fyre.ibm.com Pulling image "image-registry.openshift-image-registry.svc:5000/demo-project/iaws-server@sha256:799e69949f9f0ad2554eaafc4b5825e8f4f822fb2c8f183cee6c73320934814c" - Normal Pulled 7m59s kubelet, worker0.borstal.os.fyre.ibm.com Successfully pulled image "image-registry.openshift-image-registry.svc:5000/demo-project/iaws-server@sha256:799e69949f9f0ad2554eaafc4b5825e8f4f822fb2c8f183cee6c73320934814c" - Normal Created 7m58s kubelet, worker0.borstal.os.fyre.ibm.com Created container wf-ps - Normal Started 7m58s kubelet, worker0.borstal.os.fyre.ibm.com Started container wf-ps -``` - -Pod "demo-instance1-ibm-iaws-server-0" has three Init Containers, `ssl-init-container`, `dbcompatibility-init-container` , and `bawdbcompatibility-init-container`. For all Init Containers, the desired State should be `Terminated` with Reason `Completed`. For Container `wf-ps`, the desired Ready state should be `True`. - -

-
-
- - -- Error: failed to start container "demo-cpe-deploy" or "demo-navigator-deploy" - -
-
-Click to show detailed information and a solution. -

- -The detailed error message is something like "Error response from daemon: oci runtime error: container_linux.go:235: starting container process caused "container init exited prematurely"". This kind of error is caused by IBM Content Navigator and Content Platform Engine related PVs and PVCs that are bound incorrectly. The solution is to delete IBM Content Navigator and Content Platform Engine related PVCs, then delete IBM Content Navigator and Content Platform Engine related PVs and NFS folders. Then, re-create them in the reverse order. - -

-
-
- -- Failed to start Pod "demo-ibm-pfs-elasticsearch-0" - -Check the value of the `pfs_configuration.elasticsearch.privileged` property in your Custom Resource configuration. If it's set to `true`, run the `oc describe pod demo-ibm-pfs-elasticsearch-0` command to check the SecurityContextConstraint of pod `demo-ibm-pfs-elasticsearch-0`. Also, ensure it’s set as `openshift.io/scc=ibm-pfs-privileged-scc`. -``` -# oc describe pod demo-ibm-pfs-elasticsearch-0 -Name: demo-ibm-pfs-elasticsearch-0 -Namespace: demo-project -Priority: 0 -Node: worker2.borstal.os.fyre.ibm.com/ -Start Time: Thu, 19 Mar 2020 08:25:09 -0700 -Labels: app.kubernetes.io/component=pfs-elasticsearch - app.kubernetes.io/instance=demo - app.kubernetes.io/managed-by=Operator - app.kubernetes.io/name=demo-ibm-pfs-elasticsearch - app.kubernetes.io/version=20.0.1 - controller-revision-hash=demo-ibm-pfs-elasticsearch-665844b85f - release=20.0.1 - role=elasticsearch - statefulset.kubernetes.io/pod-name=demo-ibm-pfs-elasticsearch-0 -Annotations: checksum/config: 6a3747ddc8ce13afdfc85b6793b847d035e8edd5 - cloudpakId: 94a9c8c358bb43ba8fbdea62e7e166a5 - cloudpakName: IBM Cloud Pak for Automation - cloudpakVersion: 20.0.1 - k8s.v1.cni.cncf.io/networks-status: - [{ - "name": "openshift-sdn", - "interface": "eth0", - "ips": [ - "10.254.4.254" - ], - "dns": {}, - "default-route": [ - "10.254.4.1" - ] - }] - openshift.io/scc: ibm-pfs-privileged-scc - productChargedContainers: - productCloudpakRatio: 1:1 - productID: 534103df30f0477bb45ec3e02ef6aba0 - productMetric: VIRTUAL_PROCESSOR_CORE - productName: IBM Cloud Pak for Automation - Automation Workstream Services - productVersion: 20.0.1 -Status: Running -``` - -- To enable Automation Workstream Services container logs: - -Use the following specification to enable Automation Workstream Services container logs in the Custom Resource configuration: -```yaml -iaws_configuration: - - name: instance1 - iaws_server: - logs: - console_format: “json” - console_log_level: “INFO” - console_source: “message,trace,accessLog,ffdc,audit” - message_format: “basic” - trace_format: “ENHANCED” - trace_specification: “WLE.=all:com.ibm.bpm.=all:com.ibm.workflow.*=all” -``` - -Then, run the `oc logs IAWS_pod_name` command to see the logs, or log into Automation Workstream Services to see the logs. - -The following example shows how to check the Automation Workstream Services container logs: -``` -$ oc exec -it demo-instance1-ibm-iaws-server-0 bash -$ cat /logs/application/liberty-message.log -``` - -- To customize the Process Federation Server Liberty server trace setting - -Use the following specification to enable Process Federation Server container logs in the Custom Resource configuration: -```yaml -pfs_configuration: - pfs: - logs: - console_format: "json" - console_log_level: "INFO" - console_source: "message,trace,accessLog,ffdc,audit" - trace_format: "ENHANCED" - trace_specification: "*=info" -``` - -Then, run the `oc logs PFS_pod_name` command to see the logs, or log into Process Federation Server to see the logs. - -The following example shows how to check the Process Federation Server container logs: -``` -$ oc exec -it demo-ibm-pfs-0 bash -$ cat /logs/application/liberty-message.log -``` diff --git a/IAWS/README_upgrade.md b/IAWS/README_upgrade.md deleted file mode 100644 index e7cdd515..00000000 --- a/IAWS/README_upgrade.md +++ /dev/null @@ -1,47 +0,0 @@ -# Upgrading from IBM Automation Workstream Services 19.0.3 to 20.0.1 - -These instructions cover the upgrade of IBM Automation Workstream Services from 19.0.3 to 20.0.1. - -## Introduction - -If you installed Automation Workstream Services 19.0.3 and want to continue to use your 19.0.3 applications in Automation Workstream Services 20.0.1, you can upgrade your applications from Automation Workstream Services 19.0.3 to 20.0.1. - -## Step 1: Remove labels - -You must remove the `release` label from the `-ibm-pfs-dbareg` Deployment and the `-ibm-pfs-elasticsearch` Statefulset. To remove these labels, run the following commands: - -```sh -$ oc label deploy -ibm-pfs-dbareg release- -$ oc label sts -ibm-pfs-elasticsearch release- -``` - -**Note:** is the name you set as the `metadata.name` in your 19.0.3 custom resource file. - -## Step 2: Update the custom resource YAML file for your Automation Workstream Services 20.0.1 deployment - -Get the custom resource YAML file that you used to deploy Automation Workstream Services 19.0.3, and edit it by following these steps: - -1. Change the release version from 19.0.3 to 20.0.1. - -2. Add `appVersion: 20.0.1` to the `spec` section. See the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - -3. Update the `iaws_configuration` and `pfs_configuration` sections. - - * If you just want to update Automation Workstream Services with the minimal required values, use the values in the [sample_min_value.yaml](configuration/sample_min_value.yaml) file. - * Add `admin_user` to the `iaws_configuration[x].iaws_server` sections - * Change `iaws_configuration[x].wfs` to `iaws_configuration[x].iaws_server` - * Change `iaws_configuration[x].wfs.workflow_server_secret` to `iaws_configuration[x].iaws_server.workstream_server_secret` - * Change the image tags from 19.0.3 to 20.0.1 in all sections - - * If you want to use the full configuration list and customize the values, update the required values in the `iaws_configuration` and `pfs_configuration` sections in your custom resource YAML file based on your configuration. See the [configuration list](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_iaws_params.html) for each parameter. - -4. You can apply or remove the `ums_configuration.external_tls_secret_name` and `ums_configuration.external_tls_ca_secret_name` secrets according to your situation, by referring to the [UMS SSL configuration](../UMS/README_config_SSL.md) and [UMS upgrading configuration](../UMS/README_upgrade.md). - -## Step 3: Update the configuration sections for other deployments - -To update the configuration sections for other components, such as User Management Service and IBM Business Automation Navigator, go back to the relevant upgrade page to follow their upgrade documents to update your custom resource YAML file. - -Upgrade pages: - - [Managed OpenShift upgrade page](../platform/roks/upgrade.md) - - [OpenShift upgrade page](../platform/ocp/upgrade.md) - - [Certified Kubernetes upgrade page](../platform/k8s/upgrade.md) diff --git a/IAWS/configuration/sample_min_value.yaml b/IAWS/configuration/sample_min_value.yaml deleted file mode 100644 index 43d26014..00000000 --- a/IAWS/configuration/sample_min_value.yaml +++ /dev/null @@ -1,229 +0,0 @@ -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: demo -spec: - appVersion: 20.0.1 - iaws_configuration: - - name: instance1 - iaws_server: - service_type: "Route" - workstream_server_secret: ibm-iaws-server-secret - hostname: - port: 443 - replicas: 1 - admin_user: - image: - repository: cp.icr.io/cp/cp4a/iaws/iaws-server - tag: 20.0.1 - pullPolicy: IfNotPresent - pfs_bpd_database_init_job: - repository: cp.icr.io/cp/cp4a/iaws/pfs-bpd-database-init-prod - tag: 20.0.1 - pullPolicy: IfNotPresent - upgrade_job: - repository: cp.icr.io/cp/cp4a/iaws/iaws-server-dbhandling - tag: 20.0.1 - pullPolicy: IfNotPresent - ibm_workplace_job: - repository: cp.icr.io/cp/cp4a/iaws/iaws-ibm-workplace - tag: 20.0.1 - pull_policy: IfNotPresent - database: - ssl: false - sslsecretname: ibm-dba-baw-db2-cacert - type: "DB2" - server_name: - database_name: "BPMDB" - port: "50000" - secret_name: ibm-baw-wfs-server-db-secret - dbcheck: - wait_time: 900 - interval_time: 15 - hadr: - standbydb_host: - standbydb_port: - retryinterval: - maxretries: - content_integration: - init_job_image: - repository: cp.icr.io/cp/cp4a/iaws/iaws-ps-content-integration - tag: 20.0.1 - pull_policy: IfNotPresent - appengine: - hostname: - admin_secret_name: ae-admin-secret-instance1 - resource_registry: - hostname: - port: 443 - admin_secret_name: rr-admin-secret - jms: - image: - repository: cp.icr.io/cp/cp4a/iaws/jms - tag: 20.0.1 - pull_policy: IfNotPresent - tls: - tls_secret_name: dummy-jms-tls-secret - resources: - limits: - memory: "2Gi" - cpu: "1000m" - requests: - memory: "512Mi" - cpu: "200m" - storage: - persistent: true - size: "2Gi" - use_dynamic_provisioning: false - access_modes: - - ReadWriteOnce - storage_class: "jms-storage-class" - resources: - limits: - cpu: 3 - memory: 2096Mi - requests: - cpu: 2 - memory: 1048Mi - probe: - ws: - liveness_probe: - initial_delay_seconds: 240 - readinessProbe: - initial_delay_seconds: 180 - logs: - console_format: "json" - console_log_level: "INFO" - console_source: "message,trace,accessLog,ffdc,audit" - message_format: "basic" - trace_format: "ENHANCED" - trace_specification: "*=info" - custom_xml_secret_name: - lombardi_custom_xml_secret_name: - - pfs_configuration: - pfs: - hostname: - port: 443 - service_type: Route - image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-prod - tag: 20.0.1 - pull_policy: IfNotPresent - liveness_probe: - initial_delay_seconds: 60 - readiness_probe: - initial_delay_seconds: 60 - replicas: 1 - service_account: - anti_affinity: hard - admin_secret_name: ibm-pfs-admin-secret - config_dropins_overrides_secret: ibm-pfs-config - resources_security_secret: "" - external_tls_secret: - external_tls_ca_secret: - tls: - tls_secret_name: - tls_trust_list: - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 2 - memory: 4Gi - saved_searches: - index_name: ibmpfssavedsearches - index_number_of_shards: 3 - index_number_of_replicas: 1 - index_batch_size: 100 - update_lock_expiration: 5m - unique_constraint_expiration: 5m - security: - sso: - domain_name: - cookie_name: "ltpatoken2" - ltpa: - filename: "ltpa.keys" - expiration: "120m" - monitor_interval: "60s" - ssl_protocol: SSL - executor: - max_threads: "80" - core_threads: "40" - rest: - user_group_check_interval: "300s" - system_status_check_interval: "60s" - bd_fields_check_interval: "300s" - custom_env_variables: - names: - secret: - output: - storage: - use_dynamic_provisioning: false - size: 5Gi - storage_class: "pfs-output" - logs: - storage: - use_dynamic_provisioning: false - size: 5Gi - storage_class: "pfs-logs" - dba_resource_registry: - image: - repository: cp.icr.io/cp/cp4a/aae/dba-etcd - tag: 20.0.1 - pull_policy: IfNotPresent - lease_ttl: 120 - pfs_check_interval: 10 - pfs_connect_timeout: 10 - pfs_response_timeout: 30 - pfs_registration_key: /dba/appresources/IBM_PFS/PFS_SYSTEM - tls_secret: rr-tls-client-secret - resources: - limits: - memory: ‘512Mi’ - cpu: ‘500m’ - requests: - memory: ‘512Mi’ - cpu: ‘200m’ - elasticsearch: - es_image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-elasticsearch-prod - tag: 20.0.1 - pull_policy: IfNotPresent - pfs_init_image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-init-prod - tag: 20.0.1 - pull_policy: IfNotPresent - nginx_image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-nginx-prod - tag: 20.0.1 - pull_policy: IfNotPresent - replicas: 1 - service_type: NodePort - external_port: - anti_affinity: hard - service_account: ibm-pfs-es-service-account - privileged: true - probe_initial_delay: 90 - heap_size: "1024m" - resources: - limits: - memory: "2Gi" - cpu: "1000m" - requests: - memory: "1Gi" - cpu: "100m" - storage: - persistent: true - use_dynamic_provisioning: false - size: 10Gi - storage_class: "pfs-es" - snapshot_storage: - enabled: false - use_dynamic_provisioning: false - size: 30Gi - storage_class_name: "" - existing_claim_name: "" - security: - users_secret: "" diff --git a/LICENSE b/LICENSE index b4f3323d..6a93c809 100644 --- a/LICENSE +++ b/LICENSE @@ -1,11 +1,146 @@ -The translated license terms can be viewed here: http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?li_formnum=L-ASAY-BNFHX2 +The translated license terms can be viewed here: http://www14.software.ibm.com/cgi-bin/weblap/lap.pl?li_formnum=L-ASAY-BMUN76 + +IMPORTANT: READ CAREFULLY + +Two license agreements are presented below. + +1. IBM International License Agreement for Evaluation of Programs +2. IBM International Program License Agreement + +If Licensee is obtaining the Program for purposes of productive use (other than evaluation, testing, trial "try or buy," or demonstration): By clicking on the "Accept" button below, Licensee accepts the IBM International Program License Agreement, without modification. + +If Licensee is obtaining the Program for the purpose of evaluation, testing, trial "try or buy," or demonstration (collectively, an "Evaluation"): By clicking on the "Accept" button below, Licensee accepts both (i) the IBM International License Agreement for Evaluation of Programs (the "Evaluation License"), without modification; and (ii) the IBM International Program License Agreement (the "IPLA"), without modification. + +The Evaluation License will apply during the term of Licensee's Evaluation. + +The IPLA will automatically apply if Licensee elects to retain the Program after the Evaluation (or obtain additional copies of the Program for use after the Evaluation) by entering into a procurement agreement (e.g., the IBM International Passport Advantage or the IBM Passport Advantage Express agreements). + +The Evaluation License and the IPLA are not in effect concurrently; neither modifies the other; and each is independent of the other. + +The complete text of each of these two license agreements follow. + + + +LICENSE INFORMATION + +The Programs listed below are licensed under the following License Information terms and conditions in addition to the Program license terms previously agreed to by Client and IBM. If Client does not have previously agreed to license terms in effect for the Program, the International License Agreement for Evaluation of Programs (Z125-5543-05) applies. + +Program Name (Program Number): +IBM Cloud Pak for Automation 20.0.2 (Evaluation) + +The following standard terms apply to Licensee's use of the Program. + +Evaluation Period + +The evaluation period begins on the date that Licensee agrees to the terms of this Agreement and ends after 60 days. + +Prohibited Uses + +Licensee may not use or authorize others to use the Program if failure of the Program could lead to death, bodily injury, or property or environmental damage. + +Multi-Product Install Image + +The Program is provided as part of a multi-product install image. Licensee is authorized to install and use only the Program (and its Bundled or Supporting Programs, if any) for which a valid entitlement is obtained and may not install or use any of the other software included in the image unless Licensee has acquired separate entitlements for that other software. + +Bundled Programs + +Licensee is authorized to install and use the Bundled Programs identified below. A Bundled Program may be accompanied by license terms, and those terms, if any, apply to Licensee's use of that Bundled Program. In the event of conflict, the terms in this License Information document supersede the Bundled Program's terms. The Principal Program and any Bundled Programs are all part of the Program, as a whole. Therefore, Licensee must obtain sufficient entitlements to the Program, as a whole, to cover Licensee's installation and use of all of the Bundled Programs, unless separate entitlements are provided within this License Information document. For example, if this Program were licensed on a PVU (Processor Value Unit) basis and Licensee were to install the Principal Program or a Bundled Program on a 100 PVU machine (physical or virtual) and another Bundled Program on a second 100 PVU machine, Licensee would be required to obtain 200 PVU entitlements to the Program. + +Bundled Programs: +IBM FileNet Content Manager +IBM FileNet Content Manager for Non-Production Environment +IBM Datacap Processor Value Unit v9 +IBM Datacap for Non-Production Environment Processor Value Unit v9 +IBM Datacap Insight Edition Add-On Processor Value Unit v9 +IBM Datacap Insight Edition Add-on for Non-Production Environment Processor Value Unit v9 +IBM Content Collector for Email +IBM Content Collector for File Systems +IBM Content Collector for Microsoft SharePoint +IBM Content Collector for SAP Applications +IBM Business Automation Workflow Enterprise +IBM Business Automation Workflow Enterprise for Non-Production Environment +IBM Operational Decision Manager Server +IBM Operational Decision Manager Server for Non-Production Environment +IBM Enterprise Records + +Supporting Programs + +Licensee is authorized to install and use the Supporting Programs identified below. Licensee is authorized to install and use such Supporting Programs only to support Licensee's use of the Principal Program under this Agreement. The phrase "to support Licensee's use" would only include those uses that are necessary or otherwise directly related to a licensed use of the Principal Program or another Supporting Program. The Supporting Programs may not be used for any other purpose. A Supporting Program may be accompanied by license terms, and those terms, if any, apply to Licensee's use of that Supporting Program. In the event of conflict, the terms in this License Information document supersede the Supporting Program's terms. Licensee must obtain sufficient entitlements to the Program, as a whole, to cover Licensee's installation and use of all of the Supporting Programs, unless separate entitlements are provided within this License Information document. For example, if this Program were licensed on a PVU (Processor Value Unit) basis and Licensee were to install the Principal Program or a Supporting Program on a 100 PVU machine (physical or virtual) and another Supporting Program on a second 100 PVU machine, Licensee would be required to obtain 200 PVU entitlements to the Program. + +Supporting Programs: +IBM DB2 Standard Edition 11.5 +IBM WebSphere Liberty 19.0 +IBM WebSphere Application Server Network Deployment +IBM Event Stream 2019.4 + +Prohibited Components + +Notwithstanding any provision in the Agreement, Licensee is not authorized to use any of the following components or functions of the Program: +pureScale clustering technology (of IBM DB2 Standard Edition) +DB2 Connect (of IBM DB2 Standard Edition) +SQL Warehousing Tool (SQW) (of IBM DB2 Standard Edition) +IBM InfoSphere Data Architect 9.1.4 (of IBM DB2 Standard Edition) +IBM InfoSphere Data Replication 11.4 (CDC component) (of IBM DB2 Standard Edition) +IBM InfoSphere Data Replication 11.4 (SQL Replication, Q Replication and CDC for Db2 LUW components) +IBM WebSphere Application Server 9.0 (of IBM DB2 Standard Edition) +IBM Data Server Manager Enterprise Edition 2.1.5 (of IBM DB2 Standard Edition) + +Separately Licensed Code + +The provisions of this paragraph do not apply to the extent they are held to be invalid or unenforceable under the law that governs this license. Each of the components listed in the NON_IBM_LICENSE file is considered "Separately Licensed Code". IBM Separately Licensed Code is licensed to Licensee under the terms of the applicable third party license agreement(s) set forth in the NON_IBM_LICENSE file(s) that accompanies the Program. Notwithstanding any of the terms in the Agreement, or any other agreement Licensee may have with IBM, the terms of such third party license agreement(s) governs Licensee's use of all Separately Licensed Code unless otherwise noted below. + +Future Program updates or fixes may contain additional Separately Licensed Code. Such additional Separately Licensed Code and related licenses are listed in the applicable NON_IBM_LICENSE file that accompanies the Program update or fix. Licensee acknowledges that Licensee has read and agrees to the license agreements contained in the NON_IBM_LICENSE file(s). If Licensee does not agree to the terms of these third party license agreements, Licensee may not use the Separately Licensed Code listed therein. + +For Programs acquired under the International Program License Agreement ("IPLA") or International Program License Agreement for Non Warranted Program ("ILAN") and Licensee is the original licensee of the Program, if Licensee does not agree with the third party license agreements, Licensee may return the Program in accordance with the terms of, and within the specified time frames stated in, the "Money-back Guarantee" section of the IPLA or ILAN IBM Agreement. + +Note: Notwithstanding any of the terms in the third party license agreement, the Agreement, or any other agreement Licensee may have with IBM: +(a) IBM provides this Separately Licensed Code to Licensee WITHOUT WARRANTIES OF ANY KIND; +(b) IBM DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED WARRANTIES AND CONDITIONS INCLUDING, BUT NOT LIMITED TO, THE WARRANTY OF TITLE, NON-INFRINGEMENT OR INTERFERENCE AND THE IMPLIED WARRANTIES AND CONDITIONS OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, WITH RESPECT TO THE SEPARATELY LICENSED CODE; +(c) IBM is not liable to Licensee, and will not defend, indemnify, or hold Licensee harmless for any claims arising from or related to the Separately Licensed Code; and +(d) IBM is not liable for any direct, indirect, incidental, special, exemplary, punitive or consequential damages including, but not limited to, lost data, lost savings, and lost profits, with respect to the Separately Licensed Code. + +Notwithstanding these exclusions, in Germany and Austria, IBM's warranty and liability for the Separately Licensed Code is governed only by the respective terms applicable for Germany and Austria in IBM license agreements. + +Note: IBM may provide limited support for some Separately Licensed Code. If such support is available, the details and any additional terms related to such support will be set forth in the License Information document. + +Privacy + +Licensee acknowledges and agrees that IBM may use cookie and tracking technologies to collect personal information in gathering product usage statistics and information designed to help improve user experience and/or to tailor interactions with users in accordance with the IBM Online Privacy Policy, available at http://www.ibm.com/privacy/. + +Source Components and Sample Materials + +The Program may include some components in source code form ("Source Components") and other materials identified as Sample Materials. Licensee may copy and modify Source Components and Sample Materials for internal use only provided such use is within the limits of the license rights under this Agreement; provided, however, that Licensee may not alter or delete any copyright information or notices contained in the Source Components or Sample Materials. IBM provides the Source Components and Sample Materials without obligation of support and "AS IS", WITH NO WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING THE WARRANTY OF TITLE, NON-INFRINGEMENT OR NON-INTERFERENCE AND THE IMPLIED WARRANTIES AND CONDITIONS OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +Technology Preview Code + +Technology Preview Code (TPC) may be included or distributed with the Program or updates to it but are not part of the Program. TPC is licensed under the same terms as the Program, except as provided below. TPC will be identified as such in the Notices File (or in an updated Notices File accompanying the updates). Some or all of the TPC may not be made generally available by IBM as or in a product. Licensee is permitted to use TPC only for internal use for evaluation purposes and not for use in a production environment. The Notices File may limit this evaluation use to an evaluation period. If so, at the end of such evaluation period Licensee must cease using and uninstall the TPC. IBM provides the TPC without obligation of support and "AS IS," WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, WITHOUT LIMITATION, THE WARRANTY OF TITLE, NON-INFRINGEMENT OR NON-INTERFERENCE AND ANY IMPLIED WARRANTIES AND CONDITIONS OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. + +Licensee may not transfer TPC to another party except as a transfer accompanying the Program. TPC may contain a disabling device that will prevent it from being used after the evaluation period ends. Licensee will not tamper with this disabling device or the TPC. Licensee should take precautions to avoid any loss of data that might result when the TPC can no longer be used. + +Licensee assigns to IBM all right, title, and interest (including ownership of copyright) in any data, suggestions, or written materials that 1) are related to the Technology Preview Code and 2) Licensee provides to IBM. Upon IBM's request, Licensee will sign additional documents necessary to assign such rights. In addition to the foregoing, Licensee grants to IBM a non-exclusive, irrevocable, unrestricted, worldwide and paid-up right and license to a) include in any product or service any idea, know-how, concept, technique, invention, discovery or improvement, whether or not patentable, that Licensee provides to IBM related to the Technology Preview Code b) use, manufacture and market any such product or service, and c) allow others to do any of the foregoing. + +Licensee agrees to treat the following as "IBM Confidential Information" regardless of whether they contain restrictive markings indicating the confidential nature thereof or have been identified as IBM Confidential Information prior to disclosure: (a) the Technology Preview Code, (b) any information provided to Licensee by IBM with regard to the Technology Preview Code including, but not limited to, related materials such as specifications, plans, trends, strategies, benchmarks, performance characteristics, comparisons and other assessments of the Technology Preview Code, (c) any information related to Licensee's access to the Technology Preview Code including, but not limited to, passwords or other access codes, and (d) all data, feedback, suggestions and/or written materials that Licensee provides to IBM related to the Technology Preview Code. Licensee is authorized to use the IBM Confidential Information for the purpose for which it was disclosed or otherwise for the benefit of IBM. Notwithstanding any other terms of this Agreement, Licensee agrees not to communicate, publish, disseminate or otherwise discuss with or disclose to any third party the IBM Confidential Information (including but not limited to articles, papers or other written materials pertaining to the IBM Confidential Information) prior to IBM making such IBM Confidential Information publicly available without a non-disclosure obligation. Notwithstanding the above, Licensee does not have a confidentiality obligation to Technology Preview Code identified in the Notices File as non-confidential. + +Licensee agrees to use the same care and discretion to avoid disclosure of the IBM Confidential Information as Licensee uses with Licensee's own similar information that Licensee does not wish to disclose, but in no event will such degree of care be less than reasonable care. Licensee's obligations with respect to the IBM Confidential Information will continue for a period of two years from Licensee's receipt of the IBM Confidential Information. Licensee agrees not to disclose to IBM any information that is considered confidential or proprietary to Licensee or any third party except under a signed, separate, written confidential agreement. + +Notwithstanding the existence of any confidentiality or other agreement Licensee may have with IBM pertaining to confidential information, the preceding paragraphs will govern the treatment of the IBM Confidential Information. + +Export and Import Restrictions + +This Program may contain cryptography. Transfer to, or use by, users of the Program may be prohibited or subject to export or import laws, regulations or policies, including those of the United States Export Administration Regulations. Licensee assumes all responsibility for complying with all applicable laws, regulations, and policies regarding the export, import, or use of this Program, including but not limited to, U.S. restrictions on exports or reexports. To obtain the export classification of this Program refer to: https://www.ibm.com/products/exporting/. + +Third Party Data and Services + +The Program may contain links to or be used to access third party data services, databases, web services, software, or other third party content (all, "content"). Access to this content is provided "AS-IS", WITH NO WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING THE WARRANTY OF TITLE, NON-INFRINGEMENT OR NON-INTERFERENCE AND THE IMPLIED WARRANTIES AND CONDITIONS OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. Access can be terminated by the relevant third parties at their sole discretion at any time. Licensee may be required to enter into separate agreements with the third parties for the access to or use of such content. IBM is not a party to any such separate agreements and as an express condition of this license Licensee agrees to comply with the terms of such separate agreements. + + LICENSE INFORMATION The Programs listed below are licensed under the following License Information terms and conditions in addition to the Program license terms previously agreed to by Client and IBM. If Client does not have previously agreed to license terms in effect for the Program, the International Program License Agreement (Z125-3301-14) applies. Program Name (Program Number): -IBM Cloud Pak for Automation SR1 20.0.1 (5737-I23) +IBM Cloud Pak for Automation 20.0.2 (5737-I23) The following standard terms apply to Licensee's use of the Program. @@ -21,10 +156,6 @@ Prohibited Uses Licensee may not use or authorize others to use the Program if failure of the Program could lead to death, bodily injury, or property or environmental damage. -License Terms delivered with Program Not Applicable - -The terms of this Agreement supersede and void any electronic "click through," "shrinkwrap," or other licensing terms and conditions included with or accompanying the Program(s). - Multi-Product Install Image The Program is provided as part of a multi-product install image. Licensee is authorized to install and use only the Program (and its Bundled or Supporting Programs, if any) for which a valid entitlement is obtained and may not install or use any of the other software included in the image unless Licensee has acquired separate entitlements for that other software. @@ -42,7 +173,7 @@ IBM Datacap Insight Edition Add-On Processor Value Unit v9 IBM Datacap Insight Edition Add-on for Non-Production Environment Processor Value Unit v9 IBM Content Collector for Email IBM Content Collector for File Systems -IBM Content collector for Microsoft SharePoint +IBM Content Collector for Microsoft SharePoint IBM Content Collector for SAP Applications IBM Business Automation Workflow Enterprise IBM Business Automation Workflow Enterprise for Non-Production Environment @@ -56,9 +187,9 @@ Licensee is authorized to install and use the Supporting Programs identified bel Supporting Programs: IBM DB2 Standard Edition 11.5 -IBM WebSphere Liberty 19.0 +IBM WebSphere Liberty IBM WebSphere Application Server Network Deployment -IBM Event Stream 2019.4 +IBM Event Streams Prohibited Components @@ -69,8 +200,8 @@ SQL Warehousing Tool (SQW) (of IBM DB2 Standard Edition) IBM InfoSphere Data Architect 9.1.4 (of IBM DB2 Standard Edition) IBM InfoSphere Data Replication 11.4 (CDC component) (of IBM DB2 Standard Edition) IBM InfoSphere Data Replication 11.4 (SQL Replication, Q Replication and CDC for Db2 LUW components) -IBM WebSphere Application Server 9.0(of IBM DB2 Standard Edition) -IBM Data Server Manager Enterprise Edition 2.1.5(of IBM DB2 Standard Edition) +IBM WebSphere Application Server 9.0 (of IBM DB2 Standard Edition) +IBM Data Server Manager Enterprise Edition 2.1.5 (of IBM DB2 Standard Edition) Development Tool @@ -78,9 +209,9 @@ This Program is designed to aid in the development of software applications and Separately Licensed Code -The provisions of this paragraph do not apply to the extent they are held to be invalid or unenforceable under the law that governs this license. Each of the components listed below is considered "Separately Licensed Code". IBM Separately Licensed Code is licensed to Licensee under the terms of the applicable third party license agreement(s) set forth in the NON_IBM_LICENSE file(s) that accompanies the Program. Notwithstanding any of the terms in the Agreement, or any other agreement Licensee may have with IBM, the terms of such third party license agreement(s) governs Licensee's use of all Separately Licensed Code unless otherwise noted below. +The provisions of this paragraph do not apply to the extent they are held to be invalid or unenforceable under the law that governs this license. Each of the components listed in the NON_IBM_LICENSE file is considered "Separately Licensed Code". IBM Separately Licensed Code is licensed to Licensee under the terms of the applicable third party license agreement(s) set forth in the NON_IBM_LICENSE file(s) that accompanies the Program. Notwithstanding any of the terms in the Agreement, or any other agreement Licensee may have with IBM, the terms of such third party license agreement(s) governs Licensee's use of all Separately Licensed Code unless otherwise noted below. -Future Program updates or fixes may contain additional Separately Licensed Code. Such additional Separately Licensed Code and related licenses are listed in another NON_IBM_LICENSE file that accompanies the Program update or fix. Licensee acknowledges that Licensee has read and agrees to the license agreements contained in the NON_IBM_LICENSE file(s). If Licensee does not agree to the terms of these third party license agreements, Licensee may not use the Separately Licensed Code. +Future Program updates or fixes may contain additional Separately Licensed Code. Such additional Separately Licensed Code and related licenses are listed in the applicable NON_IBM_LICENSE file that accompanies the Program update or fix. Licensee acknowledges that Licensee has read and agrees to the license agreements contained in the NON_IBM_LICENSE file(s). If Licensee does not agree to the terms of these third party license agreements, Licensee may not use the Separately Licensed Code listed therein. For Programs acquired under the International Program License Agreement ("IPLA") or International Program License Agreement for Non Warranted Program ("ILAN") and Licensee is the original licensee of the Program, if Licensee does not agree with the third party license agreements, Licensee may return the Program in accordance with the terms of, and within the specified time frames stated in, the "Money-back Guarantee" section of the IPLA or ILAN IBM Agreement. @@ -94,38 +225,6 @@ Notwithstanding these exclusions, in Germany and Austria, IBM's warranty and lia Note: IBM may provide limited support for some Separately Licensed Code. If such support is available, the details and any additional terms related to such support will be set forth in the License Information document. -The following are Separately Licensed Code: -OpenJDK 8 -MongoDB 4.0 -Ghostscript 9.27 -stunnel 4.56 -Ansible 2.7 -Collectd 5.8 -epel-release 7 -libgcrypt-devel 1.5 -sensible-utls 0.0.12 -syslog-ng 3.5 -sshpass 1.0 -yum-plugin-gastertmirrow 1.1 -Red Hat Universal Base Image 7 -Red Hat Universal Base Image 8 -Red Hat Openshift Container Platform 3.11 or later versions -font-awesome icons 4.7 -collectd-java 4.7 -dbus 1.10 -inotify-tools 3.14 -Red Hat Enterprise Linux 7 -Red Hat Enterprise Linux 8 -Erlang/OTP 21.3 -poppler-utils 0.48 -LibreOffice 6.3 -OCRmyPDF 9.0 -Debian GNU/Linux 8 -Ubuntu 16 -Alpine Linux 3 -libonig 2 5.9 -caniuse-lite 1.0.3 - Privacy Licensee acknowledges and agrees that IBM may use cookie and tracking technologies to collect personal information in gathering product usage statistics and information designed to help improve user experience and/or to tailor interactions with users in accordance with the IBM Online Privacy Policy, available at http://www.ibm.com/privacy/. @@ -158,6 +257,14 @@ The Program may contain links to or be used to access third party data services, The following units of measure may apply to Licensee's use of the Program. +Authorized User + +Authorized User is a unit of measure by which the Program can be licensed. An Authorized User is a unique person who is given access to the Program. The Program may be installed on any number of computers or servers and each Authorized User may have simultaneous access to any number of instances of the Program at one time. Licensee must obtain separate, dedicated entitlements for each Authorized User given access to the Program in any manner directly or indirectly (for example: via a multiplexing program, device, or application server) through any means. An entitlement for an Authorized User is unique to that Authorized User and may not be shared, nor may it be reassigned other than for the permanent transfer of the Authorized User entitlement to another person. + +Concurrent User + +Concurrent User is a unit of measure by which the Program can be licensed. A Concurrent User is a person who is accessing the Program at any particular point in time. Regardless of whether the person is simultaneously accessing the Program multiple times, the person counts only as a single Concurrent User. The Program may be installed on any number of computers or servers, but Licensee must obtain entitlements for the maximum number of Concurrent Users simultaneously accessing the Program. Licensee must obtain an entitlement for each simultaneous Concurrent User accessing the Program in any manner directly or indirectly (for example: via a multiplexing program, device, or application server) through any means. + Virtual Processor Core Virtual Processor Core is a unit of measure by which the Program can be licensed. A Server is a physical computer that is comprised of processing units, memory, and input/output capabilities and that executes requested procedures, commands, or applications for one or more users or client devices. Where racks, blade enclosures, or other similar equipment is being employed, each separable physical device (for example, a blade or a rack-mounted device) that has the required components is considered itself a separate Server. A Virtual Server is either a virtual computer created by partitioning the resources available to a physical Server or an unpartitioned physical Server. A Processor Core is a functional unit within a computing device that interprets and executes instructions. A Processor Core consists of at least an instruction control unit and one or more arithmetic or logic unit. A Virtual Processor Core is a Processor Core on a Virtual Server created by partitioning the resources available to a physical Server or an unpartitioned physical Server. Licensee must obtain entitlement for each Virtual Processor Core made available to the Program. @@ -168,9 +275,9 @@ In addition to the above, the following terms apply to Licensee's use of the Pro Infrequent User -Infrequent User is a unit of measure by which the Program can be licensed. Any Infrequent Users given access to the Program requires an entitlement. An Infrequent User is an Authorized User who accesses the Program not more than one hundred twenty (120) times in any consecutive 12 month period. A single access is comprised of one or more interactions between the Infrequent User and the Program or actions performed on behalf of the Infrequent User by the Program, all within a consecutive 15 minute period. +Infrequent User is a unit of measure by which the Program can be licensed. Any Infrequent Users given access to the Program requires an entitlement. An Infrequent User is an Authorized User who accesses the Program not more than one hundred twenty (120) times in any consecutive 12 month period. A single access is comprised of one or more interactions between the Infrequent User and the Program or actions performed on behalf of the Infrequent User by the Program, all within a consecutive 15 minute period. -Licensee must track accesses by Infrequent Users to verify that they meet the access limitations of Infrequent Users. Licensee agrees to provide to IBM and its auditors details of the tracking mechanism described above, upon request. +Licensee must track accesses by Infrequent Users to verify that they meet the access limitations of Infrequent Users. Licensee agrees to provide to IBM and its auditors details of the tracking mechanism described above, upon request. Employee User @@ -200,11 +307,12 @@ Notwithstanding any provision in the Agreement, Licensee is permitted to use onl - IBM WebSphere Application Server Network Deployment only for use in support of the following Bundled Programs: IBM FileNet Content Manager, IBM FileNet Content Manager for Non-Production Environment, IBM Datacap, IBM Enterprise Records, IBM Business Automation Workflow Enterprise, IBM Business Automation Workflow Enterprise for Non-Production Environment, IBM Operational Decision Manager Server, IBM Operational Decision Manager Server for Non-Production Environment. -Components Not Used for Establishing Required Entitlements +Components Not Used for Establishing Required Entitlements When determining the number of entitlements required for Licensee's installation or use of the Program, the installation or use of the following Program components are not taken into consideration. In other words, Licensee may install and use the following Program components, under the license terms, but these components are not used to determine the number of entitlements required for the Program. +- IBM Automation Decision Services Designer - IBM Business Automation Studio -- IBM Business Automation Navigator +- IBM Business Automation Navigator - IBM Business Automation Application Designer - IBM Business Automation Application Engine when used in Non-Production - IBM Automation Digital Worker when used in Non-Production @@ -213,50 +321,57 @@ When determining the number of entitlements required for Licensee's installation Entitlement Conversion Details -These Entitlement Conversion Details outline the entitlement conversion options. Licensee is entitled to the below entitlement conversion options in any deployment combination of Licensee's choosing and may choose to convert entitlements between the listed programs below at any time provided that the sum of Licensee's deployments do not exceed the total amount of Licensee's entitlements obtained for the Program. Licensee is not entitled to use entitlements obtained of the Program for any other purpose. +These Entitlement Conversion Details outline the entitlement conversion options. Licensee is entitled to the below entitlement conversion options in any deployment combination of Licensee's choosing and may choose to convert entitlements between the listed programs below at any time provided that the sum of Licensee's deployments do not exceed the total amount of Licensee's entitlements obtained for the Program. Licensee is not entitled to use entitlements obtained of the Program for any other purpose. -Unless otherwise indicated, Licensee may deploy and use any then currently supported version or release of the listed programs. If support for a deployed version or release of a listed program is subsequently no longer being made available, support will not be available through this Program either. While Licensee may choose to continue to use that deployed version or release, Subscription and Support (S&S) for this Program will not actually provide support for the unsupported version or release of the listed program. +Unless otherwise indicated, Licensee may deploy and use any then currently supported version or release of the listed programs. If support for a deployed version or release of a listed program is subsequently no longer being made available, support will not be available through this Program either. While Licensee may choose to continue to use that deployed version or release, Subscription and Support (S&S) for this Program will not actually provide support for the unsupported version or release of the listed program. -Depending on the agreements between IBM and the Licensee, Licensee may have committed that when obtaining S&S they would do so for all uses and installations of an IBM Program. For the purposes of any such commitment, the individual Bundled Programs are the IBM Programs subject to that S&S commitment and to the extent that Licensee is obligated to acquire S&S for a Bundled Program, Licensee can satisfy that obligation as to the entitlements obtained under this Program by maintaining S&S for this Program as a whole. +Depending on the agreements between IBM and the Licensee, Licensee may have committed that when obtaining S&S they would do so for all uses and installations of an IBM Program. For the purposes of any such commitment, the individual Bundled Programs are the IBM Programs subject to that S&S commitment and to the extent that Licensee is obligated to acquire S&S for a Bundled Program, Licensee can satisfy that obligation as to the entitlements obtained under this Program by maintaining S&S for this Program as a whole. -Entitlement Values +Entitlement Values -Business Automation Application Engine (Component of the Program) -- Conversion Entitlement Ratio: 1 VPC/ 1 VPC +IBM Automation Decision Services (Component of the Program) +- Conversion Entitlement Ratio: 1 VPC/ 4 VPCs + +IBM Automation Decision Services (Component of the Program) for Non-Production +- Conversion Entitlement Ratio: 1 VPC/ 2 VPCs +- Use Limitation: Non-Production -Business Automation Insights (Component of the Program) +Business Automation Application Engine (Component of the Program) - Conversion Entitlement Ratio: 1 VPC/ 1 VPC - IBM Automation Digital Worker (Component of the Program) +Business Automation Insights (Component of the Program) - Conversion Entitlement Ratio: 1 VPC/ 1 VPC + IBM Automation Digital Worker (Component of the Program) +- Conversion Entitlement Ratio: 1 VPC/ 1 VPC + IBM FileNet Content Manager -- Conversion Entitlement Ratio: 1 VPC/ 5 VPCs or any one or any combination of any of the user measurements below: +- Conversion Entitlement Ratio: 1 VPC/ 5 VPCs or any one or any combination of any of the user measurements below: - Conversion Entitlement Ratio: 10 Concurrent User/ 1 VPC - Conversion Entitlement Ratio: 18 Authorized User/ 1 VPC - Conversion Entitlement Ratio: 36 Employee User/ 1 VPC -- Conversion Entitlement Ratio: 180 Infrequent User/ 1 VPC +- Conversion Entitlement Ratio: 180 Infrequent User/ 1 VPC - Conversion Entitlement Ratio: 3579 External User/ 1 VPC -IBM FileNet Content Manager for Non-Production Environment +IBM FileNet Content Manager for Non-Production Environment - Conversion Entitlement Ratio: 2 VPCs/ 5VPCs - Use Limitation: Non-Production IBM Business Automation Workflow Enterprise -- Conversion Entitlement Ratio: 1 VPC/ 5 VPCs or any one or any combination of any of the user measurements below: +- Conversion Entitlement Ratio: 1 VPC/ 5 VPCs or any one or any combination of any of the user measurements below: - Conversion Entitlement Ratio: 5 Concurrent User/ 1 VPC - Conversion Entitlement Ratio: 9 Authorized User/ 1 VPC - Conversion Entitlement Ratio: 18 Employee User/ 1 VPC -- Conversion Entitlement Ratio: 90 Infrequent User/ 1 VPC +- Conversion Entitlement Ratio: 90 Infrequent User/ 1 VPC - Conversion Entitlement Ratio: 1782 External User/ 1 VPC -IBM Business Automation Workflow Enterprise for Non-Production Environment +IBM Business Automation Workflow Enterprise for Non-Production Environment - Conversion Entitlement Ratio: 2 VPCs/ 5 VPCs - Use Limitation: Non-Production - -IBM Automation Workstream Services + +IBM Automation Workstream Services - Conversion Entitlement Ratio: 1 VPC/ 5 VPCs - + IBM Operational Decision Manager Server - Conversion Entitlement Ratio: 1 VPC/ 5 VPCs @@ -264,26 +379,26 @@ IBM Operational Decision Manager Server for Non-Production Environment - Conversion Entitlement Ratio: 2 VPCs/ 5 VPCs - Use Limitation: Non-Production -Business Automation Content Analyzer (Component of the Program) +Business Automation Content Analyzer (Component of the Program) - Conversion Entitlement Ratio: 1 VPC/ 1 VPC Business Automation Content Analyzer (Component of the Program) - Conversion Entitlement Ratio: 2 VPC/ 1 VPC -- Use Limitation: Non-Production +- Use Limitation: Non-Production IBM Datacap Processor Value Unit - Conversion Entitlement Ratio: 1 VPC/ 2 VPC IBM Datacap for Non-Production Environment Processor Value Unit - Conversion Entitlement Ratio: 1 VPC/ 1 VPC -- Use Limitation: Non-Production +- Use Limitation: Non-Production IBM Datacap Insight Edition Add-On Processor Value Unit - Conversion Entitlement Ratio: 1 VPC/ 2 VPC -IBM Datacap Insight Edition Add-on for Non-Production Environment Processor Value Unit +IBM Datacap Insight Edition Add-on for Non-Production Environment Processor Value Unit - Conversion Entitlement Ratio: 1 VPC/ 1 VPC -- Use Limitation: Non-Production +- Use Limitation: Non-Production IBM Content Collector for Email, Files & Sharepoint - Conversion EntitlementRatio: 1 VPC/ 3 VPC @@ -294,7 +409,7 @@ IBM Content Collector for Email, Files & Sharepoint IBM Content Collector for Email, Files & Sharepoint for Non-Production - Conversion Entitlement Ratio: 2 VPC/ 3 VPC -- Use Limitation: Non-Production +- Use Limitation: Non-Production IBM Content Collector for SAP - Conversion Entitlemen Ratio: 1 VPC/ 3 VPC @@ -306,7 +421,7 @@ IBM Content Collector for SAP IBM Content Collector for SAP for Non-Production - Conversion Entitlement Ratio: 2 VPC/ 3 VPC - Use Limitation: Non-Production - + "Conversion Entitlement Ratio n/m" means that Licensee can convert some number ('n') entitlements of the indicated metric for the listed program for every specified number ('m') entitlements of the specified metric for the Program. Once converted, Licensee may only use such converted entitlements for the listed program. The specified conversion does not apply to any entitlements for the Program that are not of the required metric type. For example, if the conversion is 100 entitlements of a listed program for every 500 entitlements obtained of the Program and Licensee acquires 1,500 entitlements of the Program, Licensee may convert those 1,500 entitlements into 300 entitlements of the listed program, allowing the Licensee to use the listed program up to the 300 entitlements. "Non-Production" means that the Bundled Program can only be deployed as part of Licensee's internal development and test environment for internal non-production activities, including but not limited to testing, performance tuning, fault diagnosis, internal benchmarking, staging, quality assurance activity and/or developing internally used additions or extensions to the Program using published application programming interfaces. Licensee is not authorized to use any part of the Bundled Program for any other purposes without acquiring the appropriate production entitlements. @@ -315,373 +430,1373 @@ Red Hat Products Red Hat Products (as listed below) are licensed separately and are supported by IBM only when used in support of the Program and only while Licensee has Software Subscription and Support in effect for the Program. In addition, Licensee agrees that its use of and support for the Red Hat Products are subject to the following terms (https://www.redhat.com/en/about/agreements). -Red Hat Universal Base Image +Red Hat Universal Base Image - Additional Entitlement Ratio: 1 VPC/ 1 VPC - + Red Hat Enterprise Linux - Additional Entitlement Ratio: 1 VPC / 1 VPC - + Red Hat OpenShift Container Platform - Additional Entitlement Ratio: 1 VPC / 1 VPC "Additional Entitlement Ratio n/m" means that Licensee receives some number ('n') entitlements of the indicated metric for the identified program for every specified number ('m') entitlements of the specified metric for the Program as a whole. The specified ratio does not apply to any entitlements for the Program that are not of the required metric type. The number of entitlements for the identified program is rounded up to a multiple of 'n'. For example, if a Program includes 100 PVUs for an identified program for every 500 PVUs obtained of the Principal Program and Licensee acquires 1,200 PVUs of the Program, Licensee may install the identified program and have processor cores available to or managed by it of up to 300 PVUs. Those PVUs would not need to be counted as part of the total PVU requirement for Licensee's installation of the Program on account of the installation of the identified program (although those PVUs might need to be counted for other reasons, such as the processor cores being made available to other components of the Program, as well). -L/N: L-ASAY-BNFHX2 -D/N: L-ASAY-BNFHX2 -P/N: L-ASAY-BNFHX2 - -International Program License Agreement -Part 1 - General Terms -BY DOWNLOADING, INSTALLING, COPYING, ACCESSING, CLICKING ON AN "ACCEPT" BUTTON, OR OTHERWISE USING THE PROGRAM, LICENSEE AGREES TO THE TERMS OF THIS AGREEMENT. IF YOU ARE ACCEPTING THESE TERMS ON BEHALF OF LICENSEE, YOU REPRESENT AND WARRANT THAT YOU HAVE FULL AUTHORITY TO BIND LICENSEE TO THESE TERMS. IF YOU DO NOT AGREE TO THESE TERMS, -* DO NOT DOWNLOAD, INSTALL, COPY, ACCESS, CLICK ON AN "ACCEPT" BUTTON, OR USE THE PROGRAM; AND -* PROMPTLY RETURN THE UNUSED MEDIA, DOCUMENTATION, AND PROOF OF ENTITLEMENT TO THE PARTY FROM WHOM IT WAS OBTAINED FOR A REFUND OF THE AMOUNT PAID. IF THE PROGRAM WAS DOWNLOADED, DESTROY ALL COPIES OF THE PROGRAM. -1. Definitions -"Authorized Use" - the specified level at which Licensee is authorized to execute or run the Program. That level may be measured by number of users, millions of service units ("MSUs"), Processor Value Units ("PVUs"), or other level of use specified by IBM. -"IBM" - International Business Machines Corporation or one of its subsidiaries. -"License Information" ("LI") - a document that provides information and any additional terms specific to a Program. The Program's LI is available at www.ibm.com/software/sla. The LI can also be found in the Program's directory, by the use of a system command, or as a booklet included with the Program. -"Program" - the following, including the original and all whole or partial copies: 1) machine-readable instructions and data, 2) components, files, and modules, 3) audio-visual content (such as images, text, recordings, or pictures), and 4) related licensed materials (such as keys and documentation). -"Proof of Entitlement" ("PoE") - evidence of Licensee's Authorized Use. The PoE is also evidence of Licensee's eligibility for warranty, future update prices, if any, and potential special or promotional opportunities. If IBM does not provide Licensee with a PoE, then IBM may accept as the PoE the original paid sales receipt or other sales record from the party (either IBM or its reseller) from whom Licensee obtained the Program, provided that it specifies the Program name and Authorized Use obtained. -"Warranty Period" - one year, starting on the date the original Licensee is granted the license. -2. Agreement Structure -This Agreement includes Part 1 - General Terms, Part 2 - Country-unique Terms (if any), the LI, and the PoE and is the complete agreement between Licensee and IBM regarding the use of the Program. It replaces any prior oral or written communications between Licensee and IBM concerning Licensee's use of the Program. The terms of Part 2 may replace or modify those of Part 1. To the extent of any conflict, the LI prevails over both Parts. -3. License Grant -The Program is owned by IBM or an IBM supplier, and is copyrighted and licensed, not sold. -IBM grants Licensee a nonexclusive license to 1) use the Program up to the Authorized Use specified in the PoE, 2) make and install copies to support such Authorized Use, and 3) make a backup copy, all provided that -a. Licensee has lawfully obtained the Program and complies with the terms of this Agreement; -b. the backup copy does not execute unless the backed-up Program cannot execute; -c. Licensee reproduces all copyright notices and other legends of ownership on each copy, or partial copy, of the Program; -d. Licensee ensures that anyone who uses the Program (accessed either locally or remotely) 1) does so only on Licensee's behalf and 2) complies with the terms of this Agreement; -e. Licensee does not 1) use, copy, modify, or distribute the Program except as expressly permitted in this Agreement; 2) reverse assemble, reverse compile, otherwise translate, or reverse engineer the Program, except as expressly permitted by law without the possibility of contractual waiver; 3) use any of the Program's components, files, modules, audio-visual content, or related licensed materials separately from that Program; or 4) sublicense, rent, or lease the Program; and -f. if Licensee obtains this Program as a Supporting Program, Licensee uses this Program only to support the Principal Program and subject to any limitations in the license to the Principal Program, or, if Licensee obtains this Program as a Principal Program, Licensee uses all Supporting Programs only to support this Program, and subject to any limitations in this Agreement. For purposes of this Item "f," a "Supporting Program" is a Program that is part of another IBM Program ("Principal Program") and identified as a Supporting Program in the Principal Program's LI. (To obtain a separate license to a Supporting Program without these restrictions, Licensee should contact the party from whom Licensee obtained the Supporting Program.) -This license applies to each copy of the Program that Licensee makes. -3.1 Trade-ups, Updates, Fixes, and Patches -3.1.1 Trade-ups -If the Program is replaced by a trade-up Program, the replaced Program's license is promptly terminated. -3.1.2 Updates, Fixes, and Patches -When Licensee receives an update, fix, or patch to a Program, Licensee accepts any additional or different terms that are applicable to such update, fix, or patch that are specified in its LI. If no additional or different terms are provided, then the update, fix, or patch is subject solely to this Agreement. If the Program is replaced by an update, Licensee agrees to promptly discontinue use of the replaced Program. -3.2 Fixed Term Licenses -If IBM licenses the Program for a fixed term, Licensee's license is terminated at the end of the fixed term, unless Licensee and IBM agree to renew it. -3.3 Term and Termination -This Agreement is effective until terminated. -IBM may terminate Licensee's license if Licensee fails to comply with the terms of this Agreement. -If the license is terminated for any reason by either party, Licensee agrees to promptly discontinue use of and destroy all of Licensee's copies of the Program. Any terms of this Agreement that by their nature extend beyond termination of this Agreement remain in effect until fulfilled, and apply to both parties' respective successors and assignees. -4. Charges -Charges are based on Authorized Use obtained, which is specified in the PoE. IBM does not give credits or refunds for charges already due or paid, except as specified elsewhere in this Agreement. -If Licensee wishes to increase its Authorized Use, Licensee must notify IBM or an authorized IBM reseller in advance and pay any applicable charges. -5. Taxes -If any authority imposes on the Program a duty, tax, levy, or fee, excluding those based on IBM's net income, then Licensee agrees to pay that amount, as specified in an invoice, or supply exemption documentation. Licensee is responsible for any personal property taxes for the Program from the date that Licensee obtains it. If any authority imposes a customs duty, tax, levy, or fee for the import into or the export, transfer, access, or use of the Program outside the country in which the original Licensee was granted the license, then Licensee agrees that it is responsible for, and will pay, any amount imposed. -6. Money-back Guarantee -If Licensee is dissatisfied with the Program for any reason and is the original Licensee, Licensee may terminate the license and obtain a refund of the amount Licensee paid for the Program, provided that Licensee returns the Program and PoE to the party from whom Licensee obtained it within 30 days of the date the PoE was issued to Licensee. If the license is for a fixed term that is subject to renewal, then Licensee may obtain a refund only if the Program and its PoE are returned within the first 30 days of the initial term. If Licensee downloaded the Program, Licensee should contact the party from whom Licensee obtained it for instructions on how to obtain the refund. -7. Program Transfer -Licensee may transfer the Program and all of Licensee's license rights and obligations to another party only if that party agrees to the terms of this Agreement. If the license is terminated for any reason by either party, Licensee is prohibited from transferring the Program to another party. Licensee may not transfer a portion of 1) the Program or 2) the Program's Authorized Use. When Licensee transfers the Program, Licensee must also transfer a hard copy of this Agreement, including the LI and PoE. Immediately after the transfer, Licensee's license terminates. -8. Warranty and Exclusions -8.1 Limited Warranty -IBM warrants that the Program, when used in its specified operating environment, will conform to its specifications. The Program's specifications, and specified operating environment information, can be found in documentation accompanying the Program (such as a read-me file) or other information published by IBM (such as an announcement letter). Licensee agrees that such documentation and other Program content may be supplied only in the English language, unless otherwise required by local law without the possibility of contractual waiver or limitation. -The warranty applies only to the unmodified portion of the Program. IBM does not warrant uninterrupted or error-free operation of the Program, or that IBM will correct all Program defects. Licensee is responsible for the results obtained from the use of the Program. -During the Warranty Period, IBM provides Licensee with access to IBM databases containing information on known Program defects, defect corrections, restrictions, and bypasses at no additional charge. Consult the IBM Software Support Handbook for further information at www.ibm.com/software/support. -If the Program does not function as warranted during the Warranty Period and the problem cannot be resolved with information available in the IBM databases, Licensee may return the Program and its PoE to the party (either IBM or its reseller) from whom Licensee obtained it and receive a refund of the amount Licensee paid. After returning the Program, Licensee's license terminates. If Licensee downloaded the Program, Licensee should contact the party from whom Licensee obtained it for instructions on how to obtain the refund. -8.2 Exclusions -THESE WARRANTIES ARE LICENSEE'S EXCLUSIVE WARRANTIES AND REPLACE ALL OTHER WARRANTIES OR CONDITIONS, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, AND ANY WARRANTY OR CONDITION OF NON-INFRINGEMENT. SOME STATES OR JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF EXPRESS OR IMPLIED WARRANTIES, SO THE ABOVE EXCLUSION MAY NOT APPLY TO LICENSEE. IN THAT EVENT, SUCH WARRANTIES ARE LIMITED IN DURATION TO THE WARRANTY PERIOD. NO WARRANTIES APPLY AFTER THAT PERIOD. SOME STATES OR JURISDICTIONS DO NOT ALLOW LIMITATIONS ON HOW LONG AN IMPLIED WARRANTY LASTS, SO THE ABOVE LIMITATION MAY NOT APPLY TO LICENSEE. -THESE WARRANTIES GIVE LICENSEE SPECIFIC LEGAL RIGHTS. LICENSEE MAY ALSO HAVE OTHER RIGHTS THAT VARY FROM STATE TO STATE OR JURISDICTION TO JURISDICTION. -THE WARRANTIES IN THIS SECTION 8 (WARRANTY AND EXCLUSIONS) ARE PROVIDED SOLELY BY IBM. THE DISCLAIMERS IN THIS SUBSECTION 8.2 (EXCLUSIONS), HOWEVER, ALSO APPLY TO IBM'S SUPPLIERS OF THIRD PARTY CODE. THOSE SUPPLIERS PROVIDE SUCH CODE WITHOUT WARRANTIES OR CONDITION OF ANY KIND. THIS PARAGRAPH DOES NOT NULLIFY IBM'S WARRANTY OBLIGATIONS UNDER THIS AGREEMENT. -9. Licensee Data and Databases -To assist Licensee in isolating the cause of a problem with the Program, IBM may request that Licensee 1) allow IBM to remotely access Licensee's system or 2) send Licensee information or system data to IBM. However, IBM is not obligated to provide such assistance unless IBM and Licensee enter a separate written agreement under which IBM agrees to provide to Licensee that type of support, which is beyond IBM's warranty obligations in this Agreement. In any event, IBM uses information about errors and problems to improve its products and services, and assist with its provision of related support offerings. For these purposes, IBM may use IBM entities and subcontractors (including in one or more countries other than the one in which Licensee is located), and Licensee authorizes IBM to do so. -Licensee remains responsible for 1) any data and the content of any database Licensee makes available to IBM, 2) the selection and implementation of procedures and controls regarding access, security, encryption, use, and transmission of data (including any personally-identifiable data), and 3) backup and recovery of any database and any stored data. Licensee will not send or provide IBM access to any personally-identifiable information, whether in data or any other form, and will be responsible for reasonable costs and other amounts that IBM may incur relating to any such information mistakenly provided to IBM or the loss or disclosure of such information by IBM, including those arising out of any third party claims. -10. Limitation of Liability -The limitations and exclusions in this Section 10 (Limitation of Liability) apply to the full extent they are not prohibited by applicable law without the possibility of contractual waiver. -10.1 Items for Which IBM May Be Liable -Circumstances may arise where, because of a default on IBM's part or other liability, Licensee is entitled to recover damages from IBM. Regardless of the basis on which Licensee is entitled to claim damages from IBM (including fundamental breach, negligence, misrepresentation, or other contract or tort claim), IBM's entire liability for all claims in the aggregate arising from or related to each Program or otherwise arising under this Agreement will not exceed the amount of any 1) damages for bodily injury (including death) and damage to real property and tangible personal property and 2) other actual direct damages up to the charges (if the Program is subject to fixed term charges, up to twelve months' charges) Licensee paid for the Program that is the subject of the claim. -This limit also applies to any of IBM's Program developers and suppliers. It is the maximum for which IBM and its Program developers and suppliers are collectively responsible. -10.2 Items for Which IBM Is Not Liable -UNDER NO CIRCUMSTANCES IS IBM, ITS PROGRAM DEVELOPERS OR SUPPLIERS LIABLE FOR ANY OF THE FOLLOWING, EVEN IF INFORMED OF THEIR POSSIBILITY: -a. LOSS OF, OR DAMAGE TO, DATA; -b. SPECIAL, INCIDENTAL, EXEMPLARY, OR INDIRECT DAMAGES, OR FOR ANY ECONOMIC CONSEQUENTIAL DAMAGES; OR -c. LOST PROFITS, BUSINESS, REVENUE, GOODWILL, OR ANTICIPATED SAVINGS. -11. Compliance Verification -For purposes of this Section 11 (Compliance Verification), "IPLA Program Terms" means 1) this Agreement and applicable amendments and transaction documents provided by IBM, and 2) IBM software policies that may be found at the IBM Software Policy website (www.ibm.com/softwarepolicies), including but not limited to those policies concerning backup, sub-capacity pricing, and migration. -The rights and obligations set forth in this Section 11 remain in effect during the period the Program is licensed to Licensee, and for two years thereafter. -11.1 Verification Process -Licensee agrees to create, retain, and provide to IBM and its auditors accurate written records, system tool outputs, and other system information sufficient to provide auditable verification that Licensee's use of all Programs is in compliance with the IPLA Program Terms, including, without limitation, all of IBM's applicable licensing and pricing qualification terms. Licensee is responsible for 1) ensuring that it does not exceed its Authorized Use, and 2) remaining in compliance with IPLA Program Terms. -Upon reasonable notice, IBM may verify Licensee's compliance with IPLA Program Terms at all sites and for all environments in which Licensee uses (for any purpose) Programs subject to IPLA Program Terms. Such verification will be conducted in a manner that minimizes disruption to Licensee's business, and may be conducted on Licensee's premises, during normal business hours. IBM may use an independent auditor to assist with such verification, provided IBM has a written confidentiality agreement in place with such auditor. -11.2 Resolution -IBM will notify Licensee in writing if any such verification indicates that Licensee has used any Program in excess of its Authorized Use or is otherwise not in compliance with the IPLA Program Terms. Licensee agrees to promptly pay directly to IBM the charges that IBM specifies in an invoice for 1) any such excess use, 2) support for such excess use for the lesser of the duration of such excess use or two years, and 3) any additional charges and other liabilities determined as a result of such verification. -12. Third Party Notices -The Program may include third party code that IBM, not the third party, licenses to Licensee under this Agreement. Notices, if any, for the third party code ("Third Party Notices") are included for Licensee's information only. These notices can be found in the Program's NOTICES file(s). Information on how to obtain source code for certain third party code can be found in the Third Party Notices. If in the Third Party Notices IBM identifies third party code as "Modifiable Third Party Code," IBM authorizes Licensee to 1) modify the Modifiable Third Party Code and 2) reverse engineer the Program modules that directly interface with the Modifiable Third Party Code provided that it is only for the purpose of debugging Licensee's modifications to such third party code. IBM's service and support obligations, if any, apply only to the unmodified Program. -13. General -a. Nothing in this Agreement affects any statutory rights of consumers that cannot be waived or limited by contract. -b. For Programs IBM provides to Licensee in tangible form, IBM fulfills its shipping and delivery obligations upon the delivery of such Programs to the IBM-designated carrier, unless otherwise agreed to in writing by Licensee and IBM. -c. If any provision of this Agreement is held to be invalid or unenforceable, the remaining provisions of this Agreement remain in full force and effect. -d. Licensee agrees to comply with all applicable export and import laws and regulations, including U.S. embargo and sanctions regulations and prohibitions on export for certain end uses or to certain users. -e. Licensee authorizes International Business Machines Corporation and its subsidiaries (and their successors and assigns, contractors and IBM Business Partners) to store and use Licensee's business contact information wherever they do business, in connection with IBM products and services, or in furtherance of IBM's business relationship with Licensee. -f. Each party will allow the other reasonable opportunity to comply before it claims that the other has not met its obligations under this Agreement. The parties will attempt in good faith to resolve all disputes, disagreements, or claims between the parties relating to this Agreement. -g. Unless otherwise required by applicable law without the possibility of contractual waiver or limitation: 1) neither party will bring a legal action, regardless of form, for any claim arising out of or related to this Agreement more than two years after the cause of action arose; and 2) upon the expiration of such time limit, any such claim and all respective rights related to the claim lapse. -h. Neither Licensee nor IBM is responsible for failure to fulfill any obligations due to causes beyond its control. -i. No right or cause of action for any third party is created by this Agreement, nor is IBM responsible for any third party claims against Licensee, except as permitted in Subsection 10.1 (Items for Which IBM May Be Liable) above for bodily injury (including death) or damage to real or tangible personal property for which IBM is legally liable to that third party. -j. In entering into this Agreement, neither party is relying on any representation not specified in this Agreement, including but not limited to any representation concerning: 1) the performance or function of the Program, other than as expressly warranted in Section 8 (Warranty and Exclusions) above; 2) the experiences or recommendations of other parties; or 3) any results or savings that Licensee may achieve. -k. IBM has signed agreements with certain organizations (called "IBM Business Partners") to promote, market, and support certain Programs. IBM Business Partners remain independent and separate from IBM. IBM is not responsible for the actions or statements of IBM Business Partners or obligations they have to Licensee. -l. The license and intellectual property indemnification terms of Licensee's other agreements with IBM (such as the IBM Customer Agreement) do not apply to Program licenses granted under this Agreement. -14. Geographic Scope and Governing Law -14.1 Governing Law -Both parties agree to the application of the laws of the country in which Licensee obtained the Program license to govern, interpret, and enforce all of Licensee's and IBM's respective rights, duties, and obligations arising from, or relating in any manner to, the subject matter of this Agreement, without regard to conflict of law principles. -The United Nations Convention on Contracts for the International Sale of Goods does not apply. -14.2 Jurisdiction -All rights, duties, and obligations are subject to the courts of the country in which Licensee obtained the Program license. -Part 2 - Country-unique Terms -For licenses granted in the countries specified below, the following terms replace or modify the referenced terms in Part 1. All terms in Part 1 that are not changed by these amendments remain unchanged and in effect. This Part 2 is organized as follows: -* Multiple country amendments to Part 1, Section 14 (Governing Law and Jurisdiction); -* Americas country amendments to other Agreement terms; -* Asia Pacific country amendments to other Agreement terms; and -* Europe, Middle East, and Africa country amendments to other Agreement terms. -Multiple country amendments to Part 1, Section 14 (Governing Law and Jurisdiction) -14.1 Governing Law -The phrase "the laws of the country in which Licensee obtained the Program license" in the first paragraph of 14.1 Governing Law is replaced by the following phrases in the countries below: -AMERICAS -(1) In Canada: the laws in the Province of Ontario; -(2) in Mexico: the federal laws of the Republic of Mexico; -(3) in the United States, Anguilla, Antigua/Barbuda, Aruba, British Virgin Islands, Cayman Islands, Dominica, Grenada, Guyana, Saint Kitts and Nevis, Saint Lucia, Saint Maarten, and Saint Vincent and the Grenadines: the laws of the State of New York, United States; -(4) in Venezuela: the laws of the Bolivarian Republic of Venezuela; -ASIA PACIFIC -(5) in Cambodia and Laos: the laws of the State of New York, United States; -(6) in Australia: the laws of the State or Territory in which the transaction is performed; -(7) in Hong Kong SAR and Macau SAR: the laws of Hong Kong Special Administrative Region ("SAR"); -(8) in Taiwan: the laws of Taiwan; -EUROPE, MIDDLE EAST, AND AFRICA -(9) in Albania, Armenia, Azerbaijan, Belarus, Bosnia-Herzegovina, Bulgaria, Croatia, Former Yugoslav Republic of Macedonia, Georgia, Hungary, Kazakhstan, Kyrgyzstan, Moldova, Montenegro, Poland, Romania, Russia, Serbia, Slovakia, Tajikistan, Turkmenistan, Ukraine, and Uzbekistan: the laws of Austria; -(10) in Algeria, Andorra, Benin, Burkina Faso, Cameroon, Cape Verde, Central African Republic, Chad, Comoros, Congo Republic, Djibouti, Democratic Republic of Congo, Equatorial Guinea, French Guiana, French Polynesia, Gabon, Gambia, Guinea, Guinea-Bissau, Ivory Coast, Lebanon, Madagascar, Mali, Mauritania, Mauritius, Mayotte, Morocco, New Caledonia, Niger, Reunion, Senegal, Seychelles, Togo, Tunisia, Vanuatu, and Wallis and Futuna: the laws of France; -(11) in Estonia, Latvia, and Lithuania: the laws of Finland; -(12) in Angola, Bahrain, Botswana, Burundi, Egypt, Eritrea, Ethiopia, Ghana, Jordan, Kenya, Kuwait, Liberia, Malawi, Malta, Mozambique, Nigeria, Oman, Pakistan, Qatar, Rwanda, Sao Tome and Principe, Saudi Arabia, Sierra Leone, Somalia, Tanzania, Uganda, United Arab Emirates, the United Kingdom, West Bank/Gaza, Yemen, Zambia, and Zimbabwe: the laws of England; and -(13) in South Africa, Namibia, Lesotho, and Swaziland: the laws of the Republic of South Africa. -14.2 Jurisdiction -The following paragraph pertains to jurisdiction and replaces Subsection 14.2 (Jurisdiction) as it applies for those countries identified below: -All rights, duties, and obligations are subject to the courts of the country in which Licensee obtained the Program license except that in the countries identified below all disputes arising out of or related to this Agreement, including summary proceedings, will be brought before and subject to the exclusive jurisdiction of the following courts of competent jurisdiction: -AMERICAS -(1) In Argentina: the Ordinary Commercial Court of the city of Buenos Aires; -(2) in Brazil: the court of Rio de Janeiro, RJ; -(3) in Chile: the Civil Courts of Justice of Santiago; -(4) in Ecuador: the civil judges of Quito for executory or summary proceedings (as applicable); -(5) in Mexico: the courts located in Mexico City, Federal District; -(6) in Peru: the judges and tribunals of the judicial district of Lima, Cercado; -(7) in Uruguay: the courts of the city of Montevideo; -(8) in Venezuela: the courts of the metropolitan area of the city of Caracas; -EUROPE, MIDDLE EAST, AND AFRICA -(9) in Austria: the court of law in Vienna, Austria (Inner-City); -(10) in Algeria, Andorra, Benin, Burkina Faso, Cameroon, Cape Verde, Central African Republic, Chad, Comoros, Congo Republic, Djibouti, Democratic Republic of Congo, Equatorial Guinea, France, French Guiana, French Polynesia, Gabon, Gambia, Guinea, Guinea-Bissau, Ivory Coast, Lebanon, Madagascar, Mali, Mauritania, Mauritius, Mayotte, Monaco, Morocco, New Caledonia, Niger, Reunion, Senegal, Seychelles, Togo, Tunisia, Vanuatu, and Wallis and Futuna: the Commercial Court of Paris; -(11) in Angola, Bahrain, Botswana, Burundi, Egypt, Eritrea, Ethiopia, Ghana, Jordan, Kenya, Kuwait, Liberia, Malawi, Malta, Mozambique, Nigeria, Oman, Pakistan, Qatar, Rwanda, Sao Tome and Principe, Saudi Arabia, Sierra Leone, Somalia, Tanzania, Uganda, United Arab Emirates, the United Kingdom, West Bank/Gaza, Yemen, Zambia, and Zimbabwe: the English courts; -(12) in South Africa, Namibia, Lesotho, and Swaziland: the High Court in Johannesburg; -(13) in Greece: the competent court of Athens; -(14) in Israel: the courts of Tel Aviv-Jaffa; -(15) in Italy: the courts of Milan; -(16) in Portugal: the courts of Lisbon; -(17) in Spain: the courts of Madrid; and -(18) in Turkey: the Istanbul Central Courts and Execution Directorates of Istanbul, the Republic of Turkey. -14.3 Arbitration -The following paragraph is added as a new Subsection 14.3 (Arbitration) as it applies for those countries identified below. The provisions of this Subsection 14.3 prevail over those of Subsection 14.2 (Jurisdiction) to the extent permitted by the applicable governing law and rules of procedure: -ASIA PACIFIC -(1) In Cambodia, India, Laos, Philippines, and Vietnam: -Disputes arising out of or in connection with this Agreement will be finally settled by arbitration which will be held in Singapore in accordance with the Arbitration Rules of Singapore International Arbitration Center ("SIAC Rules") then in effect. The arbitration award will be final and binding for the parties without appeal and will be in writing and set forth the findings of fact and the conclusions of law. -The number of arbitrators will be three, with each side to the dispute being entitled to appoint one arbitrator. The two arbitrators appointed by the parties will appoint a third arbitrator who will act as chairman of the proceedings. Vacancies in the post of chairman will be filled by the president of the SIAC. Other vacancies will be filled by the respective nominating party. Proceedings will continue from the stage they were at when the vacancy occurred. -If one of the parties refuses or otherwise fails to appoint an arbitrator within 30 days of the date the other party appoints its, the first appointed arbitrator will be the sole arbitrator, provided that the arbitrator was validly and properly appointed. -All proceedings will be conducted, including all documents presented in such proceedings, in the English language. The English language version of this Agreement prevails over any other language version. -(2) In the People's Republic of China: -In case no settlement can be reached, the disputes will be submitted to China International Economic and Trade Arbitration Commission for arbitration according to the then effective rules of the said Arbitration Commission. The arbitration will take place in Beijing and be conducted in Chinese. The arbitration award will be final and binding on both parties. During the course of arbitration, this agreement will continue to be performed except for the part which the parties are disputing and which is undergoing arbitration. -(3) In Indonesia: -Each party will allow the other reasonable opportunity to comply before it claims that the other has not met its obligations under this Agreement. The parties will attempt in good faith to resolve all disputes, disagreements, or claims between the parties relating to this Agreement. Unless otherwise required by applicable law without the possibility of contractual waiver or limitation, i) neither party will bring a legal action, regardless of form, arising out of or related to this Agreement or any transaction under it more than two years after the cause of action arose; and ii) after such time limit, any legal action arising out of this Agreement or any transaction under it and all respective rights related to any such action lapse. -Disputes arising out of or in connection with this Agreement shall be finally settled by arbitration that shall be held in Jakarta, Indonesia in accordance with the rules of Board of the Indonesian National Board of Arbitration (Badan Arbitrase Nasional Indonesia or "BANI") then in effect. The arbitration award shall be final and binding for the parties without appeal and shall be in writing and set forth the findings of fact and the conclusions of law. -The number of arbitrators shall be three, with each side to the dispute being entitled to appoint one arbitrator. The two arbitrators appointed by the parties shall appoint a third arbitrator who shall act as chairman of the proceedings. Vacancies in the post of chairman shall be filled by the chairman of the BANI. Other vacancies shall be filled by the respective nominating party. Proceedings shall continue from the stage they were at when the vacancy occurred. -If one of the parties refuses or otherwise fails to appoint an arbitrator within 30 days of the date the other party appoints its, the first appointed arbitrator shall be the sole arbitrator, provided that the arbitrator was validly and properly appointed. -All proceedings shall be conducted, including all documents presented in such proceedings, in the English and/or Indonesian language. -EUROPE, MIDDLE EAST, AND AFRICA -(4) In Albania, Armenia, Azerbaijan, Belarus, Bosnia-Herzegovina, Bulgaria, Croatia, Former Yugoslav Republic of Macedonia, Georgia, Hungary, Kazakhstan, Kyrgyzstan, Moldova, Montenegro, Poland, Romania, Russia, Serbia, Slovakia, Tajikistan, Turkmenistan, Ukraine, and Uzbekistan: -All disputes arising out of this Agreement or related to its violation, termination or nullity will be finally settled under the Rules of Arbitration and Conciliation of the International Arbitral Center of the Federal Economic Chamber in Vienna (Vienna Rules) by three arbitrators appointed in accordance with these rules. The arbitration will be held in Vienna, Austria, and the official language of the proceedings will be English. The decision of the arbitrators will be final and binding upon both parties. Therefore, pursuant to paragraph 598 (2) of the Austrian Code of Civil Procedure, the parties expressly waive the application of paragraph 595 (1) figure 7 of the Code. IBM may, however, institute proceedings in a competent court in the country of installation. -(5) In Estonia, Latvia, and Lithuania: -All disputes arising in connection with this Agreement will be finally settled in arbitration that will be held in Helsinki, Finland in accordance with the arbitration laws of Finland then in effect. Each party will appoint one arbitrator. The arbitrators will then jointly appoint the chairman. If arbitrators cannot agree on the chairman, then the Central Chamber of Commerce in Helsinki will appoint the chairman. -AMERICAS COUNTRY AMENDMENTS -CANADA -10.1 Items for Which IBM May be Liable -The following replaces Item 1 in the first paragraph of this Subsection 10.1 (Items for Which IBM May be Liable): -1) damages for bodily injury (including death) and physical harm to real property and tangible personal property caused by IBM's negligence; and -13. General -The following replaces Item 13.d: -d. Licensee agrees to comply with all applicable export and import laws and regulations, including those of that apply to goods of United States origin and that prohibit or limit export for certain uses or to certain users. -The following replaces Item 13.i: -i. No right or cause of action for any third party is created by this Agreement or any transaction under it, nor is IBM responsible for any third party claims against Licensee except as permitted by the Limitation of Liability section above for bodily injury (including death) or physical harm to real or tangible personal property caused by IBM's negligence for which IBM is legally liable to that third party. -The following is added as Item 13.m: -m. For purposes of this Item 13.m, "Personal Data" refers to information relating to an identified or identifiable individual made available by one of the parties, its personnel or any other individual to the other in connection with this Agreement. The following provisions apply in the event that one party makes Personal Data available to the other: -(1) General -(a) Each party is responsible for complying with any obligations applying to it under applicable Canadian data privacy laws and regulations ("Laws"). -(b) Neither party will request Personal Data beyond what is necessary to fulfill the purpose(s) for which it is requested. The purpose(s) for requesting Personal Data must be reasonable. Each party will agree in advance as to the type of Personal Data that is required to be made available. -(2) Security Safeguards -(a) Each party acknowledges that it is solely responsible for determining and communicating to the other the appropriate technological, physical and organizational security measures required to protect Personal Data. -(b) Each party will ensure that Personal Data is protected in accordance with the security safeguards communicated and agreed to by the other. -(c) Each party will ensure that any third party to whom Personal Data is transferred is bound by the applicable terms of this section. -(d) Additional or different services required to comply with the Laws will be deemed a request for new services. -(3) Use -Each party agrees that Personal Data will only be used, accessed, managed, transferred, disclosed to third parties or otherwise processed to fulfill the purpose(s) for which it was made available. -(4) Access Requests -(a) Each party agrees to reasonably cooperate with the other in connection with requests to access or amend Personal Data. -(b) Each party agrees to reimburse the other for any reasonable charges incurred in providing each other assistance. -(c) Each party agrees to amend Personal Data only upon receiving instructions to do so from the other party or its personnel. -(5) Retention -Each party will promptly return to the other or destroy all Personal Data that is no longer necessary to fulfill the purpose(s) for which it was made available, unless otherwise instructed by the other or its personnel or required by law. -(6) Public Bodies Who Are Subject to Public Sector Privacy Legislation -For Licensees who are public bodies subject to public sector privacy legislation, this Item 13.m applies only to Personal Data made available to Licensee in connection with this Agreement, and the obligations in this section apply only to Licensee, except that: 1) section (2)(a) applies only to IBM; 2) sections (1)(a) and (4)(a) apply to both parties; and 3) section (4)(b) and the last sentence in (1)(b) do not apply. -PERU -10. Limitation of Liability -The following is added to the end of this Section 10 (Limitation of Liability): -Except as expressly required by law without the possibility of contractual waiver, Licensee and IBM intend that the limitation of liability in this Limitation of Liability section applies to damages caused by all types of claims and causes of action. If any limitation on or exclusion from liability in this section is held by a court of competent jurisdiction to be unenforceable with respect to a particular claim or cause of action, the parties intend that it nonetheless apply to the maximum extent permitted by applicable law to all other claims and causes of action. -10.1 Items for Which IBM May be Liable -The following is added at the end of this Subsection 10.1: -In accordance with Article 1328 of the Peruvian Civil Code, the limitations and exclusions specified in this section will not apply to damages caused by IBM's willful misconduct ("dolo") or gross negligence ("culpa inexcusable"). -UNITED STATES OF AMERICA -5. Taxes -The following is added at the end of this Section 5 (Taxes) -For Programs delivered electronically in the United States for which Licensee claims a state sales and use tax exemption, Licensee agrees not to receive any tangible personal property (e.g., media and publications) associated with the electronic program. -Licensee agrees to be responsible for any sales and use tax liabilities that may arise as a result of Licensee's subsequent redistribution of Programs after delivery by IBM. -13. General -The following is added to Section 13 as Item 13.m: -U.S. Government Users Restricted Rights - Use, duplication or disclosure is restricted by the GSA IT Schedule 70 Contract with the IBM Corporation. -The following is added to Item 13.f: -Each party waives any right to a jury trial in any proceeding arising out of or related to this Agreement. -ASIA PACIFIC COUNTRY AMENDMENTS -AUSTRALIA -5. Taxes -The following sentences replace the first two sentences of Section 5 (Taxes): -If any government or authority imposes a duty, tax (other than income tax), levy, or fee, on this Agreement or on the Program itself, that is not otherwise provided for in the amount payable, Licensee agrees to pay it when IBM invoices Licensee. If the rate of GST changes, IBM may adjust the charge or other amount payable to take into account that change from the date the change becomes effective. -8.1 Limited Warranty -The following is added to Subsection 8.1 (Limited Warranty): -The warranties specified this Section are in addition to any rights Licensee may have under the Competition and Consumer Act 2010 or other legislation and are only limited to the extent permitted by the applicable legislation. -10.1 Items for Which IBM May be Liable -The following is added to Subsection 10.1 (Items for Which IBM May be Liable): -Where IBM is in breach of a condition or warranty implied by the Competition and Consumer Act 2010, IBM's liability is limited to the repair or replacement of the goods, or the supply of equivalent goods. Where that condition or warranty relates to right to sell, quiet possession or clear title, or the goods are of a kind ordinarily obtained for personal, domestic or household use or consumption, then none of the limitations in this paragraph apply. -HONG KONG SAR, MACAU SAR, AND TAIWAN -As applies to licenses obtained in Taiwan and the special administrative regions, phrases throughout this Agreement containing the word "country" (for example, "the country in which the original Licensee was granted the license" and "the country in which Licensee obtained the Program license") are replaced with the following: -(1) In Hong Kong SAR: "Hong Kong SAR" -(2) In Macau SAR: "Macau SAR" except in the Governing Law clause (Section 14.1) -(3) In Taiwan: "Taiwan." -INDIA -10.1 Items for Which IBM May be Liable -The following replaces the terms of Items 1 and 2 of the first paragraph: -1) liability for bodily injury (including death) or damage to real property and tangible personal property will be limited to that caused by IBM's negligence; and 2) as to any other actual damage arising in any situation involving nonperformance by IBM pursuant to, or in any way related to the subject of this Agreement, IBM's liability will be limited to the charge paid by Licensee for the individual Program that is the subject of the claim. -13. General -The following replaces the terms of Item 13.g: -If no suit or other legal action is brought, within three years after the cause of action arose, in respect of any claim that either party may have against the other, the rights of the concerned party in respect of such claim will be forfeited and the other party will stand released from its obligations in respect of such claim. -INDONESIA -3.3 Term and Termination -The following is added to the last paragraph: -Both parties waive the provision of article 1266 of the Indonesian Civil Code, to the extent the article provision requires such court decree for the termination of an agreement creating mutual obligations. -JAPAN -13. General -The following is inserted after Item 13.f: -Any doubts concerning this Agreement will be initially resolved between us in good faith and in accordance with the principle of mutual trust. -MALAYSIA -10.2 Items for Which IBM Is not Liable -The word "SPECIAL" in Item 10.2b is deleted. -NEW ZEALAND -8.1 Limited Warranty -The following is added: -The warranties specified in this Section are in addition to any rights Licensee may have under the Consumer Guarantees Act 1993 or other legislation which cannot be excluded or limited. The Consumer Guarantees Act 1993 will not apply in respect of any goods which IBM provides, if Licensee requires the goods for the purposes of a business as defined in that Act. -10. Limitation of Liability -The following is added: -Where Programs are not obtained for the purposes of a business as defined in the Consumer Guarantees Act 1993, the limitations in this Section are subject to the limitations in that Act. -PEOPLE'S REPUBLIC OF CHINA -4. Charges -The following is added: -All banking charges incurred in the People's Republic of China will be borne by Licensee and those incurred outside the People's Republic of China will be borne by IBM. -PHILIPPINES -10.2 Items for Which IBM Is not Liable -The following replaces the terms of Item 10.2b: -b. special (including nominal and exemplary damages), moral, incidental, or indirect damages or for any economic consequential damages; or -SINGAPORE -10.2 Items for Which IBM Is not Liable -The words "SPECIAL" and "ECONOMIC" are deleted from Item 10.2b. -13. General -The following replaces the terms of Item 13.i: -Subject to the rights provided to IBM's suppliers and Program developers as provided in Section 10 above (Limitation of Liability), a person who is not a party to this Agreement will have no right under the Contracts (Right of Third Parties) Act to enforce any of its terms. -TAIWAN -8.1 Limited Warranty -The last paragraph is deleted. -10.1 Items for Which IBM May Be Liable -The following sentences are deleted: -This limit also applies to any of IBM's subcontractors and Program developers. It is the maximum for which IBM and its subcontractors and Program developers are collectively responsible. -EUROPE, MIDDLE EAST, AFRICA (EMEA) COUNTRY AMENDMENTS -EUROPEAN UNION MEMBER STATES -8. Warranty and Exclusions -The following is added to Section 8 (Warranty and Exclusion): -In the European Union ("EU"), consumers have legal rights under applicable national legislation governing the sale of consumer goods. Such rights are not affected by the provisions set out in this Section 8 (Warranty and Exclusions). The territorial scope of the Limited Warranty is worldwide. -EU MEMBER STATES AND THE COUNTRIES IDENTIFIED BELOW -Iceland, Liechtenstein, Norway, Switzerland, Turkey, and any other European country that has enacted local data privacy or protection legislation similar to the EU model. -13. General -The following replaces Item 13.e: -(1) Definitions - For the purposes of this Item 13.e, the following additional definitions apply: -(a) Business Contact Information - business-related contact information disclosed by Licensee to IBM, including names, job titles, business addresses, telephone numbers and email addresses of Licensee's employees and contractors. For Austria, Italy and Switzerland, Business Contact Information also includes information about Licensee and its contractors as legal entities (for example, Licensee's revenue data and other transactional information) -(b) Business Contact Personnel - Licensee employees and contractors to whom the Business Contact Information relates. -(c) Data Protection Authority - the authority established by the Data Protection and Electronic Communications Legislation in the applicable country or, for non-EU countries, the authority responsible for supervising the protection of personal data in that country, or (for any of the foregoing) any duly appointed successor entity thereto. -(d) Data Protection & Electronic Communications Legislation - (i) the applicable local legislation and regulations in force implementing the requirements of EU Directive 95/46/EC (on the protection of individuals with regard to the processing of personal data and on the free movement of such data) and of EU Directive 2002/58/EC (concerning the processing of personal data and the protection of privacy in the electronic communications sector); or (ii) for non-EU countries, the legislation and/or regulations passed in the applicable country relating to the protection of personal data and the regulation of electronic communications involving personal data, including (for any of the foregoing) any statutory replacement or modification thereof. -(e) IBM Group - International Business Machines Corporation of Armonk, New York, USA, its subsidiaries, and their respective Business Partners and subcontractors. -(2) Licensee authorizes IBM: -(a) to process and use Business Contact Information within IBM Group in support of Licensee including the provision of support services, and for the purpose of furthering the business relationship between Licensee and IBM Group, including, without limitation, contacting Business Contact Personnel (by email or otherwise) and marketing IBM Group products and services (the "Specified Purpose"); and -(b) to disclose Business Contact Information to other members of IBM Group in pursuit of the Specified Purpose only. -(3) IBM agrees that all Business Contact Information will be processed in accordance with the Data Protection & Electronic Communications Legislation and will be used only for the Specified Purpose. -(4) To the extent required by the Data Protection & Electronic Communications Legislation, Licensee represents that (a) it has obtained (or will obtain) any consents from (and has issued (or will issue) any notices to) the Business Contact Personnel as are necessary in order to enable IBM Group to process and use the Business Contact Information for the Specified Purpose. -(5) Licensee authorizes IBM to transfer Business Contact Information outside the European Economic Area, provided that the transfer is made on contractual terms approved by the Data Protection Authority or the transfer is otherwise permitted under the Data Protection & Electronic Communications Legislation. -AUSTRIA -8.2 Exclusions -The following is deleted from the first paragraph: -MERCHANTABILITY, SATISFACTORY QUALITY -10. Limitation of Liability -The following is added: -The following limitations and exclusions of IBM's liability do not apply for damages caused by gross negligence or willful misconduct. -10.1 Items for Which IBM May Be Liable -The following replaces the first sentence in the first paragraph: -Circumstances may arise where, because of a default by IBM in the performance of its obligations under this Agreement or other liability, Licensee is entitled to recover damages from IBM. -In the second sentence of the first paragraph, delete entirely the parenthetical phrase: -"(including fundamental breach, negligence, misrepresentation, or other contract or tort claim)". -10.2 Items for Which IBM Is Not Liable -The following replaces Item 10.2b: -b. indirect damages or consequential damages; or -BELGIUM, FRANCE, ITALY, AND LUXEMBOURG -10. Limitation of Liability -The following replaces the terms of Section 10 (Limitation of Liability) in its entirety: -Except as otherwise provided by mandatory law: -10.1 Items for Which IBM May Be Liable -IBM's entire liability for all claims in the aggregate for any damages and losses that may arise as a consequence of the fulfillment of its obligations under or in connection with this Agreement or due to any other cause related to this Agreement is limited to the compensation of only those damages and losses proved and actually arising as an immediate and direct consequence of the non-fulfillment of such obligations (if IBM is at fault) or of such cause, for a maximum amount equal to the charges (if the Program is subject to fixed term charges, up to twelve months' charges) Licensee paid for the Program that has caused the damages. -The above limitation will not apply to damages for bodily injuries (including death) and damages to real property and tangible personal property for which IBM is legally liable. -10.2 Items for Which IBM Is Not Liable -UNDER NO CIRCUMSTANCES IS IBM OR ANY OF ITS PROGRAM DEVELOPERS LIABLE FOR ANY OF THE FOLLOWING, EVEN IF INFORMED OF THEIR POSSIBILITY: 1) LOSS OF, OR DAMAGE TO, DATA; 2) INCIDENTAL, EXEMPLARY OR INDIRECT DAMAGES, OR FOR ANY ECONOMIC CONSEQUENTIAL DAMAGES; AND / OR 3) LOST PROFITS, BUSINESS, REVENUE, GOODWILL, OR ANTICIPATED SAVINGS, EVEN IF THEY ARISE AS AN IMMEDIATE CONSEQUENCE OF THE EVENT THAT GENERATED THE DAMAGES. -10.3 Suppliers and Program Developers -The limitation and exclusion of liability herein agreed applies not only to the activities performed by IBM but also to the activities performed by its suppliers and Program developers, and represents the maximum amount for which IBM as well as its suppliers and Program developers are collectively responsible. -GERMANY -8.1 Limited Warranty -The following is inserted at the beginning of Section 8.1: -The Warranty Period is twelve months from the date of delivery of the Program to the original Licensee. -8.2 Exclusions -Section 8.2 is deleted in its entirety and replaced with the following: -Section 8.1 defines IBM's entire warranty obligations to Licensee except as otherwise required by applicable statutory law. -10. Limitation of Liability -The following replaces the Limitation of Liability section in its entirety: -a. IBM will be liable without limit for 1) loss or damage caused by a breach of an express guarantee; 2) damages or losses resulting in bodily injury (including death); and 3) damages caused intentionally or by gross negligence. -b. In the event of loss, damage and frustrated expenditures caused by slight negligence or in breach of essential contractual obligations, IBM will be liable, regardless of the basis on which Licensee is entitled to claim damages from IBM (including fundamental breach, negligence, misrepresentation, or other contract or tort claim), per claim only up to the greater of 500,000 euro or the charges (if the Program is subject to fixed term charges, up to 12 months' charges) Licensee paid for the Program that caused the loss or damage. A number of defaults which together result in, or contribute to, substantially the same loss or damage will be treated as one default. -c. In the event of loss, damage and frustrated expenditures caused by slight negligence, IBM will not be liable for indirect or consequential damages, even if IBM was informed about the possibility of such loss or damage. -d. In case of delay on IBM's part: 1) IBM will pay to Licensee an amount not exceeding the loss or damage caused by IBM's delay and 2) IBM will be liable only in respect of the resulting damages that Licensee suffers, subject to the provisions of Items a and b above. -13. General -The following replaces the provisions of 13.g: -Any claims resulting from this Agreement are subject to a limitation period of three years, except as stated in Section 8.1 (Limited Warranty) of this Agreement. -The following replaces the provisions of 13.i: -No right or cause of action for any third party is created by this Agreement, nor is IBM responsible for any third party claims against Licensee, except (to the extent permitted in Section 10 (Limitation of Liability)) for: i) bodily injury (including death); or ii) damage to real or tangible personal property for which (in either case) IBM is legally liable to that third party. -IRELAND -8.2 Exclusions -The following paragraph is added: -Except as expressly provided in these terms and conditions, or Section 12 of the Sale of Goods Act 1893 as amended by the Sale of Goods and Supply of Services Act, 1980 (the "1980 Act"), all conditions or warranties (express or implied, statutory or otherwise) are hereby excluded including, without limitation, any warranties implied by the Sale of Goods Act 1893 as amended by the 1980 Act (including, for the avoidance of doubt, Section 39 of the 1980 Act). -IRELAND AND UNITED KINGDOM -2. Agreement Structure -The following sentence is added: -Nothing in this paragraph shall have the effect of excluding or limiting liability for fraud. -10.1 Items for Which IBM May Be Liable -The following replaces the first paragraph of the Subsection: -For the purposes of this section, a "Default" means any act, statement, omission or negligence on the part of IBM in connection with, or in relation to, the subject matter of an Agreement in respect of which IBM is legally liable to Licensee, whether in contract or in tort. A number of Defaults which together result in, or contribute to, substantially the same loss or damage will be treated as one Default. -Circumstances may arise where, because of a Default by IBM in the performance of its obligations under this Agreement or other liability, Licensee is entitled to recover damages from IBM. Regardless of the basis on which Licensee is entitled to claim damages from IBM and except as expressly required by law without the possibility of contractual waiver, IBM's entire liability for any one Default will not exceed the amount of any direct damages, to the extent actually suffered by Licensee as an immediate and direct consequence of the default, up to the greater of (1) 500,000 euro (or the equivalent in local currency) or (2) 125% of the charges (if the Program is subject to fixed term charges, up to 12 months' charges) for the Program that is the subject of the claim. Notwithstanding the foregoing, the amount of any damages for bodily injury (including death) and damage to real property and tangible personal property for which IBM is legally liable is not subject to such limitation. -10.2 Items for Which IBM is Not Liable -The following replaces Items 10.2b and 10.2c: -b. special, incidental, exemplary, or indirect damages or consequential damages; or -c. wasted management time or lost profits, business, revenue, goodwill, or anticipated savings. -Z125-3301-14 (07/2011) +L/N: L-ASAY-BMUN76 +D/N: L-ASAY-BMUN76 +P/N: L-ASAY-BMUN76 + + +IMPORTANT: READ CAREFULLY + +Two license agreements are presented below. + +1. IBM International License Agreement for Evaluation of Programs +2. IBM International Program License Agreement + +If Licensee is obtaining the Program for purposes of productive use (other than evaluation, testing, trial "try or buy," or demonstration): By clicking on the "Accept" button below, Licensee accepts the IBM International Program License Agreement, without modification. + +If Licensee is obtaining the Program for the purpose of evaluation, testing, trial "try or buy," or demonstration (collectively, an "Evaluation"): By clicking on the "Accept" button below, Licensee accepts both (i) the IBM International License Agreement for Evaluation of Programs (the "Evaluation License"), without modification; and (ii) the IBM International Program License Agreement (the "IPLA"), without modification. + +The Evaluation License will apply during the term of Licensee's Evaluation. + +The IPLA will automatically apply if Licensee elects to retain the Program after the Evaluation (or obtain additional copies of the Program for use after the Evaluation) by entering into a procurement agreement (e.g., the IBM International Passport Advantage or the IBM Passport Advantage Express agreements). + +The Evaluation License and the IPLA are not in effect concurrently; neither modifies the other; and each is independent of the other. + +The complete text of each of these two license agreements follow. + + +International License Agreement for Evaluation of Programs + +Part 1 - General Terms + +BY DOWNLOADING, INSTALLING, COPYING, ACCESSING, CLICKING ON AN "ACCEPT" BUTTON, OR OTHERWISE USING THE PROGRAM, LICENSEE AGREES TO THE TERMS OF THIS AGREEMENT. IF YOU ARE ACCEPTING THESE TERMS ON BEHALF OF LICENSEE, YOU REPRESENT AND WARRANT THAT YOU HAVE FULL AUTHORITY TO BIND LICENSEE TO THESE TERMS. IF YOU DO NOT AGREE TO THESE TERMS, + +* DO NOT DOWNLOAD, INSTALL, COPY, ACCESS, CLICK ON AN "ACCEPT" BUTTON, OR USE THE PROGRAM; AND + +* PROMPTLY RETURN THE UNUSED MEDIA AND DOCUMENTATION TO THE PARTY FROM WHOM IT WAS OBTAINED. IF THE PROGRAM WAS DOWNLOADED, DESTROY ALL COPIES OF THE PROGRAM. + +1. Definitions + +"Authorized Use" - the specified level at which Licensee is authorized to execute or run the Program. That level may be measured by number of users, millions of service units ("MSUs"), Processor Value Units ("PVUs"), or other level of use specified by IBM. + +"IBM" - International Business Machines Corporation or one of its subsidiaries. + +"License Information" ("LI") - a document that provides information and any additional terms specific to a Program. The Program's LI can be found in the Program's directory, by the use of a system command, or as a booklet included with the Program. + +"Program" - the following, including the original and all whole or partial copies: 1) machine-readable instructions and data, 2) components, files, and modules, 3) audio-visual content (such as images, text, recordings, or pictures), and 4) related licensed materials (such as keys and documentation). + +2. Agreement Structure + +This Agreement includes Part 1 - General Terms, Part 2 - Country-unique Terms (if any) and the LI and is the complete agreement between Licensee and IBM regarding the use of the Program. It replaces any prior oral or written communications between Licensee and IBM concerning Licensee's use of the Program. The terms of Part 2 may replace or modify those of Part 1. To the extent of any conflict, the LI prevails over both Parts. + +3. License Grant + +The Program is owned by IBM or an IBM supplier, and is copyrighted and licensed, not sold. + +IBM grants Licensee a limited, nonexclusive, nontransferable license to 1) download, install, and use the Program during the evaluation period up to the Authorized Use specified in the LI solely for internal evaluation, testing, or demonstration purposes on a trial basis; 2) make and install a reasonable number of copies to support such Authorized Use, and 3) make a backup copy, all provided that + +a. Licensee has lawfully obtained the Program and complies with the terms of this Agreement; + +b. the backup copy does not execute unless the backed-up Program cannot execute; + +c. Licensee reproduces all copyright notices and other legends of ownership on each copy, or partial copy, of the Program; + +d. Licensee maintains a record of all copies of the Program and ensures that anyone who uses the Program (accessed either locally or remotely) 1) does so only on Licensee's behalf and 2) complies with the terms of this Agreement; + +e. Licensee does not 1) use the Program for productive purposes or otherwise use, copy, modify, or distribute the Program except as expressly permitted in this Agreement; 2) reverse assemble, reverse compile, otherwise translate, or reverse engineer the Program, except as expressly permitted by law without the possibility of contractual waiver; 3) use any of the Program's components, files, modules, audio-visual content, or related licensed materials separately from that Program; 4) sublicense, rent, or lease the Program; or 5) use the Program for commercial application hosting; and + +f. if Licensee obtains this Program as a Supporting Program, Licensee uses this Program only to support the Principal Program and subject to any limitations in the license to the Principal Program, or, if Licensee obtains this Program as a Principal Program, Licensee uses all Supporting Programs only to support this Program, and subject to any limitations in this Agreement. For purposes of this Item "f," a "Supporting Program" is a Program that is part of another IBM Program ("Principal Program") and identified as a Supporting Program in the Principal Program's LI. (To obtain a separate license to a Supporting Program without these restrictions, Licensee should contact the party from whom Licensee obtained the Supporting Program.) + +This license applies to each copy of the Program that Licensee makes. + +3.1 Updates, Fixes, and Patches + +When Licensee receives an update, fix, or patch to a Program, Licensee accepts any additional or different terms that are applicable to such update, fix, or patch that are specified in its LI. If no additional or different terms are provided, then the update, fix, or patch is subject solely to this Agreement. If the Program is replaced by an update, Licensee agrees to promptly discontinue use of the replaced Program. + +3.2 Term and Termination + +The evaluation period begins on the date Licensee agrees to the terms of this Agreement and ends upon the earliest of 1) the end of the duration or the date specified by IBM in either the License Information or a transaction document or 2) the date on which the Program automatically disables itself. Licensee will destroy the Program and all copies made of it within ten days of the end of the evaluation period. If IBM specifies in the LI that Licensee may retain the Program, and Licensee elects to do so, then the Program will be subject to a different license agreement, which IBM will provide to Licensee. In addition, a charge may apply. + +IBM may terminate Licensee's license if Licensee fails to comply with the terms of this Agreement. If the license is terminated for any reason by either party, Licensee agrees to promptly discontinue use of and destroy all of Licensee's copies of the Program. Any terms of this Agreement that by their nature extend beyond termination of this Agreement remain in effect until fulfilled, and apply to both parties' respective successors and assignees. + +THE PROGRAM MAY CONTAIN A DISABLING DEVICE THAT WILL PREVENT IT FROM BEING USED AFTER THE EVALUATION PERIOD ENDS. LICENSEE AGREES NOT TO TAMPER WITH THE DISABLING DEVICE OR THE PROGRAM. LICENSEE SHOULD TAKE PRECAUTIONS TO AVOID ANY LOSS OF DATA THAT MIGHT RESULT WHEN THE PROGRAM CAN NO LONGER BE USED. + +4. Charges + +There is no charge for the use of the Program for the duration of the evaluation period. + +5. No Warranties + +SUBJECT TO ANY STATUTORY WARRANTIES THAT CANNOT BE EXCLUDED, IBM MAKES NO WARRANTIES OR CONDITIONS, EXPRESS OR IMPLIED, REGARDING THE PROGRAM OR SUPPORT, IF ANY, INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE, AND TITLE, AND ANY WARRANTY OR CONDITION OF NON-INFRINGEMENT. + +SOME STATES OR JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF EXPRESS OR IMPLIED WARRANTIES, SO THE ABOVE EXCLUSION MAY NOT APPLY TO LICENSEE. IN THAT EVENT, SUCH WARRANTIES ARE LIMITED IN DURATION TO THE MINIMUM PERIOD REQUIRED BY LAW. NO WARRANTIES APPLY AFTER THAT PERIOD. SOME STATES OR JURISDICTIONS DO NOT ALLOW LIMITATIONS ON HOW LONG AN IMPLIED WARRANTY LASTS, SO THE ABOVE LIMITATION MAY NOT APPLY TO LICENSEE. LICENSEE MAY HAVE OTHER RIGHTS THAT VARY FROM STATE TO STATE OR JURISDICTION TO JURISDICTION. + +THE DISCLAIMERS AND EXCLUSIONS IN THIS SECTION 5 ALSO APPLY TO ANY OF IBM'S PROGRAM DEVELOPERS AND SUPPLIERS. + +MANUFACTURERS, SUPPLIERS, OR PUBLISHERS OF NON-IBM PROGRAMS MAY PROVIDE THEIR OWN WARRANTIES. + +IBM DOES NOT PROVIDE SUPPORT OF ANY KIND, UNLESS IBM SPECIFIES OTHERWISE. IN SUCH EVENT, ANY SUPPORT PROVIDED BY IBM IS SUBJECT TO THE DISCLAIMERS AND EXCLUSIONS IN THIS SECTION 5. + +6. Licensee Data and Databases + +To assist Licensee in isolating the cause of a problem with the Program, IBM may request that Licensee 1) allow IBM to remotely access Licensee's system or 2) send Licensee information or system data to IBM. However, IBM is not obligated to provide such assistance unless IBM and Licensee enter a separate written agreement under which IBM agrees to provide to Licensee that type of support, which is beyond IBM's obligations in this Agreement. In any event, IBM uses information about errors and problems to improve its products and services, and assist with its provision of related support offerings. For these purposes, IBM may use IBM entities and subcontractors (including in one or more countries other than the one in which Licensee is located), and Licensee authorizes IBM to do so. + +Licensee remains responsible for 1) any data and the content of any database Licensee makes available to IBM, 2) the selection and implementation of procedures and controls regarding access, security, encryption, use, and transmission of data (including any personally-identifiable data), and 3) backup and recovery of any database and any stored data. Licensee will not send or provide IBM access to any personally-identifiable information, whether in data or any other form, and will be responsible for reasonable costs and other amounts that IBM may incur relating to any such information mistakenly provided to IBM or the loss or disclosure of such information by IBM, including those arising out of any third party claims. + +7. Limitation of Liability + +The limitations and exclusions in this Section 7 (Limitation of Liability) apply to the full extent they are not prohibited by applicable law without the possibility of contractual waiver. + +7.1 Items for Which IBM May Be Liable + +Circumstances may arise where, because of a default on IBM's part or other liability, Licensee is entitled to recover damages from IBM. Regardless of the basis on which Licensee is entitled to claim damages from IBM (including fundamental breach, negligence, misrepresentation, or other contract or tort claim), IBM's entire liability for all claims in the aggregate arising from or related to each Program or otherwise arising under this Agreement will not exceed the amount of any 1) damages for bodily injury (including death) and damage to real property and tangible personal property and 2) other actual direct damages up to U.S. $10,000 (or equivalent in local currency). + +This limit also applies to any of IBM's Program developers and suppliers. It is the maximum for which IBM and its Program developers and suppliers are collectively responsible. + +7.2 Items for Which IBM Is Not Liable + +UNDER NO CIRCUMSTANCES IS IBM, ITS PROGRAM DEVELOPERS OR SUPPLIERS LIABLE FOR ANY OF THE FOLLOWING, EVEN IF INFORMED OF THEIR POSSIBILITY: + +a. LOSS OF, OR DAMAGE TO, DATA; + +b. SPECIAL, INCIDENTAL, EXEMPLARY, OR INDIRECT DAMAGES, OR FOR ANY ECONOMIC CONSEQUENTIAL DAMAGES; OR + +c. LOST PROFITS, BUSINESS, REVENUE, GOODWILL, OR ANTICIPATED SAVINGS. + +8. Compliance Verification + +For purposes of this Section 8 (Compliance Verification), "Evaluation Program Terms" means 1) this Agreement and applicable amendments and transaction documents provided by IBM, and 2) IBM software policies that may be found at the IBM Software Policy website (www.ibm.com/softwarepolicies), including but not limited to those policies concerning backup, sub-capacity pricing, and migration. + +The rights and obligations set forth in this Section 8 remain in effect during the period the Program is licensed to Licensee, and for two years thereafter. + +8.1 Verification Process + +Licensee agrees to create, retain, and provide to IBM and its auditors accurate written records, system tool outputs, and other system information sufficient to provide auditable verification that Licensee's use of all Programs is in compliance with the Evaluation Program Terms, including, without limitation, all of IBM's applicable licensing and pricing qualification terms. Licensee is responsible for 1) ensuring that it does not exceed its Authorized Use, and 2) remaining in compliance with Evaluation Program Terms. + +Upon reasonable notice, IBM may verify Licensee's compliance with Evaluation Program Terms at all sites and for all environments in which Licensee uses (for any purpose) Programs subject to Evaluation Program Terms. Such verification will be conducted in a manner that minimizes disruption to Licensee's business, and may be conducted on Licensee's premises, during normal business hours. IBM may use an independent auditor to assist with such verification, provided IBM has a written confidentiality agreement in place with such auditor. + +8.2 Resolution + +IBM will notify Licensee in writing if any such verification indicates that Licensee has used any Program in excess of its Authorized Use or is otherwise not in compliance with the Evaluation Program Terms. Licensee agrees to promptly pay directly to IBM the charges that IBM specifies in an invoice for 1) any such excess use, 2) support for such excess use for the lesser of the duration of such excess use or two years, and 3) any additional charges and other liabilities determined as a result of such verification. + +9. Third Party Notices + +The Program may include third party code that IBM, not the third party, licenses to Licensee under this Agreement. Notices, if any, for the third party code ("Third Party Notices") are included for Licensee's information only. These notices can be found in the Program's NOTICES file(s). Information on how to obtain source code for certain third party code can be found in the Third Party Notices. If in the Third Party Notices IBM identifies third party code as "Modifiable Third Party Code," IBM authorizes Licensee to 1) modify the Modifiable Third Party Code and 2) reverse engineer the Program modules that directly interface with the Modifiable Third Party Code provided that it is only for the purpose of debugging Licensee's modifications to such third party code. IBM's service and support obligations, if any, apply only to the unmodified Program. + +10. General + +a. Nothing in this Agreement affects any statutory rights of consumers that cannot be waived or limited by contract. + +b. If any provision of this Agreement is held to be invalid or unenforceable, the remaining provisions of this Agreement remain in full force and effect. + +c. Licensee is prohibited from exporting the Program. + +d. Licensee authorizes International Business Machines Corporation and its subsidiaries (and their successors and assigns, contractors and IBM Business Partners) to store and use Licensee's business contact information wherever they do business, in connection with IBM products and services, or in furtherance of IBM's business relationship with Licensee. + +e. Each party will allow the other reasonable opportunity to comply before it claims that the other has not met its obligations under this Agreement. The parties will attempt in good faith to resolve all disputes, disagreements, or claims between the parties relating to this Agreement. + +f. Unless otherwise required by applicable law without the possibility of contractual waiver or limitation: 1) neither party will bring a legal action, regardless of form, for any claim arising out of or related to this Agreement more than two years after the cause of action arose; and 2) upon the expiration of such time limit, any such claim and all respective rights related to the claim lapse. + +g. Neither Licensee nor IBM is responsible for failure to fulfill any obligations due to causes beyond its control. + +h. No right or cause of action for any third party is created by this Agreement, nor is IBM responsible for any third party claims against Licensee, except as permitted in Subsection 7.1 (Items for Which IBM May Be Liable) above for bodily injury (including death) or damage to real or tangible personal property for which IBM is legally liable to that third party. + +i. In entering into this Agreement, neither party is relying on any representation not specified in this Agreement, including but not limited to any representation concerning: 1) the performance or function of the Program, other than as expressly warranted in Section 5 (No Warranties) above; 2) the experiences or recommendations of other parties; or 3) any results or savings that Licensee may achieve. + +j. IBM has signed agreements with certain organizations (called "IBM Business Partners") to promote, market, and support certain Programs. IBM Business Partners remain independent and separate from IBM. IBM is not responsible for the actions or statements of IBM Business Partners or obligations they have to Licensee. + +k. The license and intellectual property indemnification terms of Licensee's other agreements with IBM (such as the IBM Customer Agreement) do not apply to Program licenses granted under this Agreement. + +11. Geographic Scope and Governing Law + +11.1 Governing Law + +Both parties agree to the application of the laws of the country in which Licensee obtained the Program license to govern, interpret, and enforce all of Licensee's and IBM's respective rights, duties, and obligations arising from, or relating in any manner to, the subject matter of this Agreement, without regard to conflict of law principles. + +The United Nations Convention on Contracts for the International Sale of Goods does not apply. + +11.2 Jurisdiction + +All rights, duties, and obligations are subject to the courts of the country in which Licensee obtained the Program license. + +Part 2 - Country-unique Terms + +For licenses granted in the countries specified below, the following terms replace or modify the referenced terms in Part 1. All terms in Part 1 that are not changed by these amendments remain unchanged and in effect. This Part 2 is organized as follows: + +* Multiple country amendments to Part 1, Section 11 (Governing Law and Jurisdiction); + +* Americas country amendments to other Agreement terms; + +* Asia Pacific country amendments to other Agreement terms; and + +* Europe, Middle East, and Africa country amendments to other Agreement terms. + +Multiple country amendments to Part 1, Section 11 (Governing Law and Jurisdiction) + +11.1 Governing Law + +The phrase "the laws of the country in which Licensee obtained the Program license" in the first paragraph of 11.1 Governing Law is replaced by the following phrases in the countries below: + +AMERICAS + +(1) In Canada: the laws in the Province of Ontario; + +(2) in Mexico: the federal laws of the Republic of Mexico; + +(3) in the United States, Anguilla, Antigua/Barbuda, Aruba, British Virgin Islands, Cayman Islands, Dominica, Grenada, Guyana, Saint Kitts and Nevis, Saint Lucia, Saint Maarten, and Saint Vincent and the Grenadines: the laws of the State of New York, United States; + +(4) in Venezuela: the laws of the Bolivarian Republic of Venezuela; + +ASIA PACIFIC + +(5) in Cambodia and Laos: the laws of the State of New York, United States; + +(6) in Australia: the laws of the State or Territory in which the transaction is performed; + +(7) in Hong Kong SAR and Macau SAR: the laws of Hong Kong Special Administrative Region ("SAR"); + +(8) in Taiwan: the laws of Taiwan; + +EUROPE, MIDDLE EAST, AND AFRICA + +(9) in Albania, Armenia, Azerbaijan, Belarus, Bosnia-Herzegovina, Bulgaria, Croatia, Former Yugoslav Republic of Macedonia, Georgia, Hungary, Kazakhstan, Kyrgyzstan, Moldova, Montenegro, Poland, Romania, Russia, Serbia, Slovakia, Tajikistan, Turkmenistan, Ukraine, and Uzbekistan: the laws of Austria; + +(10) in Algeria, Andorra, Benin, Burkina Faso, Cameroon, Cape Verde, Central African Republic, Chad, Comoros, Congo Republic, Djibouti, Democratic Republic of Congo, Equatorial Guinea, French Guiana, French Polynesia, Gabon, Gambia, Guinea, Guinea-Bissau, Ivory Coast, Lebanon, Madagascar, Mali, Mauritania, Mauritius, Mayotte, Morocco, New Caledonia, Niger, Reunion, Senegal, Seychelles, Togo, Tunisia, Vanuatu, and Wallis and Futuna: the laws of France; + +(11) in Estonia, Latvia, and Lithuania: the laws of Finland; + +(12) in Angola, Bahrain, Botswana, Burundi, Egypt, Eritrea, Ethiopia, Ghana, Jordan, Kenya, Kuwait, Liberia, Malawi, Malta, Mozambique, Nigeria, Oman, Pakistan, Qatar, Rwanda, Sao Tome and Principe, Saudi Arabia, Sierra Leone, Somalia, Tanzania, Uganda, United Arab Emirates, the United Kingdom, West Bank/Gaza, Yemen, Zambia, and Zimbabwe: the laws of England; and + +(13) in South Africa, Namibia, Lesotho, and Swaziland: the laws of the Republic of South Africa. + +11.2 Jurisdiction + +The following paragraph pertains to jurisdiction and replaces Subsection 11.2 (Jurisdiction) as it applies for those countries identified below: + +All rights, duties, and obligations are subject to the courts of the country in which Licensee obtained the Program license except that in the countries identified below all disputes arising out of or related to this Agreement, including summary proceedings, will be brought before and subject to the exclusive jurisdiction of the following courts of competent jurisdiction: + +AMERICAS + +(1) In Argentina: the Ordinary Commercial Court of the city of Buenos Aires; + +(2) in Brazil: the court of Rio de Janeiro, RJ; + +(3) in Chile: the Civil Courts of Justice of Santiago; + +(4) in Ecuador: the civil judges of Quito for executory or summary proceedings (as applicable); + +(5) in Mexico: the courts located in Mexico City, Federal District; + +(6) in Peru: the judges and tribunals of the judicial district of Lima, Cercado; + +(7) in Uruguay: the courts of the city of Montevideo; + +(8) in Venezuela: the courts of the metropolitan area of the city of Caracas; + +EUROPE, MIDDLE EAST, AND AFRICA + +(9) in Austria: the court of law in Vienna, Austria (Inner-City); + +(10) in Algeria, Andorra, Benin, Burkina Faso, Cameroon, Cape Verde, Central African Republic, Chad, Comoros, Congo Republic, Djibouti, Democratic Republic of Congo, Equatorial Guinea, France, French Guiana, French Polynesia, Gabon, Gambia, Guinea, Guinea-Bissau, Ivory Coast, Lebanon, Madagascar, Mali, Mauritania, Mauritius, Mayotte, Monaco, Morocco, New Caledonia, Niger, Reunion, Senegal, Seychelles, Togo, Tunisia, Vanuatu, and Wallis and Futuna: the Commercial Court of Paris; + +(11) in Angola, Bahrain, Botswana, Burundi, Egypt, Eritrea, Ethiopia, Ghana, Jordan, Kenya, Kuwait, Liberia, Malawi, Malta, Mozambique, Nigeria, Oman, Pakistan, Qatar, Rwanda, Sao Tome and Principe, Saudi Arabia, Sierra Leone, Somalia, Tanzania, Uganda, United Arab Emirates, the United Kingdom, West Bank/Gaza, Yemen, Zambia, and Zimbabwe: the English courts; + +(12) in South Africa, Namibia, Lesotho, and Swaziland: the High Court in Johannesburg; + +(13) in Greece: the competent court of Athens; + +(14) in Israel: the courts of Tel Aviv-Jaffa; + +(15) in Italy: the courts of Milan; + +(16) in Portugal: the courts of Lisbon; + +(17) in Spain: the courts of Madrid; and + +(18) in Turkey: the Istanbul Central Courts and Execution Directorates of Istanbul, the Republic of Turkey. + +11.3 Arbitration + +The following paragraph is added as a new Subsection 11.3 (Arbitration) as it applies for those countries identified below. The provisions of this Subsection 11.3 prevail over those of Subsection 11.2 (Jurisdiction) to the extent permitted by the applicable governing law and rules of procedure: + +ASIA PACIFIC + +(1) In Cambodia, India, Laos, Philippines, and Vietnam: + +Disputes arising out of or in connection with this Agreement will be finally settled by arbitration which will be held in Singapore in accordance with the Arbitration Rules of Singapore International Arbitration Center ("SIAC Rules") then in effect. The arbitration award will be final and binding for the parties without appeal and will be in writing and set forth the findings of fact and the conclusions of law. + +The number of arbitrators will be three, with each side to the dispute being entitled to appoint one arbitrator. The two arbitrators appointed by the parties will appoint a third arbitrator who will act as chairman of the proceedings. Vacancies in the post of chairman will be filled by the president of the SIAC. Other vacancies will be filled by the respective nominating party. Proceedings will continue from the stage they were at when the vacancy occurred. + +If one of the parties refuses or otherwise fails to appoint an arbitrator within 30 days of the date the other party appoints its, the first appointed arbitrator will be the sole arbitrator, provided that the arbitrator was validly and properly appointed. + +All proceedings will be conducted, including all documents presented in such proceedings, in the English language. The English language version of this Agreement prevails over any other language version. + +(2) In the People's Republic of China: + +In case no settlement can be reached, the disputes will be submitted to China International Economic and Trade Arbitration Commission for arbitration according to the then effective rules of the said Arbitration Commission. The arbitration will take place in Beijing and be conducted in Chinese. The arbitration award will be final and binding on both parties. During the course of arbitration, this agreement will continue to be performed except for the part which the parties are disputing and which is undergoing arbitration. + +(3) In Indonesia: + +Each party will allow the other reasonable opportunity to comply before it claims that the other has not met its obligations under this Agreement. The parties will attempt in good faith to resolve all disputes, disagreements, or claims between the parties relating to this Agreement. Unless otherwise required by applicable law without the possibility of contractual waiver or limitation, i) neither party will bring a legal action, regardless of form, arising out of or related to this Agreement or any transaction under it more than two years after the cause of action arose; and ii) after such time limit, any legal action arising out of this Agreement or any transaction under it and all respective rights related to any such action lapse. + +Disputes arising out of or in connection with this Agreement shall be finally settled by arbitration that shall be held in Jakarta, Indonesia in accordance with the rules of Board of the Indonesian National Board of Arbitration (Badan Arbitrase Nasional Indonesia or "BANI") then in effect. The arbitration award shall be final and binding for the parties without appeal and shall be in writing and set forth the findings of fact and the conclusions of law. + +The number of arbitrators shall be three, with each side to the dispute being entitled to appoint one arbitrator. The two arbitrators appointed by the parties shall appoint a third arbitrator who shall act as chairman of the proceedings. Vacancies in the post of chairman shall be filled by the chairman of the BANI. Other vacancies shall be filled by the respective nominating party. Proceedings shall continue from the stage they were at when the vacancy occurred. + +If one of the parties refuses or otherwise fails to appoint an arbitrator within 30 days of the date the other party appoints its, the first appointed arbitrator shall be the sole arbitrator, provided that the arbitrator was validly and properly appointed. + +All proceedings shall be conducted, including all documents presented in such proceedings, in the English and/or Indonesian language. + +EUROPE, MIDDLE EAST, AND AFRICA + +(4) In Albania, Armenia, Azerbaijan, Belarus, Bosnia-Herzegovina, Bulgaria, Croatia, Former Yugoslav Republic of Macedonia, Georgia, Hungary, Kazakhstan, Kyrgyzstan, Moldova, Montenegro, Poland, Romania, Russia, Serbia, Slovakia, Tajikistan, Turkmenistan, Ukraine, and Uzbekistan: + +All disputes arising out of this Agreement or related to its violation, termination or nullity will be finally settled under the Rules of Arbitration and Conciliation of the International Arbitral Center of the Federal Economic Chamber in Vienna (Vienna Rules) by three arbitrators appointed in accordance with these rules. The arbitration will be held in Vienna, Austria, and the official language of the proceedings will be English. The decision of the arbitrators will be final and binding upon both parties. Therefore, pursuant to paragraph 598 (2) of the Austrian Code of Civil Procedure, the parties expressly waive the application of paragraph 595 (1) figure 7 of the Code. IBM may, however, institute proceedings in a competent court in the country of installation. + +(5) In Estonia, Latvia, and Lithuania: + +All disputes arising in connection with this Agreement will be finally settled in arbitration that will be held in Helsinki, Finland in accordance with the arbitration laws of Finland then in effect. Each party will appoint one arbitrator. The arbitrators will then jointly appoint the chairman. If arbitrators cannot agree on the chairman, then the Central Chamber of Commerce in Helsinki will appoint the chairman. + +AMERICAS COUNTRY AMENDMENTS + +CANADA + +7.1 Items for Which IBM May Be Liable + +The following replaces Item 1 in the first paragraph of this Subsection 7.1 (Items for Which IBM May Be Liable): + +1) damages for bodily injury (including death) and physical harm to real property and tangible personal property caused by IBM's negligence; and + +10. General + +The following replaces Item 10.h: + +h. No right or cause of action for any third party is created by this Agreement or any transaction under it, nor is IBM responsible for any third party claims against Licensee except as permitted by the Limitation of Liability section above for bodily injury (including death) or physical harm to real or tangible personal property caused by IBM's negligence for which IBM is legally liable to that third party. + +The following is added as Item 10.l: + +l. For purposes of this Item 10.l, "Personal Data" refers to information relating to an identified or identifiable individual made available by one of the parties, its personnel or any other individual to the other in connection with this Agreement. The following provisions apply in the event that one party makes Personal Data available to the other: + +(1) General + +(a) Each party is responsible for complying with any obligations applying to it under applicable Canadian data privacy laws and regulations ("Laws"). + +(b) Neither party will request Personal Data beyond what is necessary to fulfill the purpose(s) for which it is requested. The purpose(s) for requesting Personal Data must be reasonable. Each party will agree in advance as to the type of Personal Data that is required to be made available. + +(2) Security Safeguards + +(a) Each party acknowledges that it is solely responsible for determining and communicating to the other the appropriate technological, physical and organizational security measures required to protect Personal Data. + +(b) Each party will ensure that Personal Data is protected in accordance with the security safeguards communicated and agreed to by the other. + +(c) Each party will ensure that any third party to whom Personal Data is transferred is bound by the applicable terms of this section. + +(d) Additional or different services required to comply with the Laws will be deemed a request for new services. + +(3) Use + +Each party agrees that Personal Data will only be used, accessed, managed, transferred, disclosed to third parties or otherwise processed to fulfill the purpose(s) for which it was made available. + +(4) Access Requests + +(a) Each party agrees to reasonably cooperate with the other in connection with requests to access or amend Personal Data. + +(b) Each party agrees to reimburse the other for any reasonable charges incurred in providing each other assistance. + +(c) Each party agrees to amend Personal Data only upon receiving instructions to do so from the other party or its personnel. + +(5) Retention + +Each party will promptly return to the other or destroy all Personal Data that is no longer necessary to fulfill the purpose(s) for which it was made available, unless otherwise instructed by the other or its personnel or required by law. + +(6) Public Bodies Who Are Subject to Public Sector Privacy Legislation + +For Licensees who are public bodies subject to public sector privacy legislation, this Item 10.l applies only to Personal Data made available to Licensee in connection with this Agreement, and the obligations in this section apply only to Licensee, except that: 1) section (2)(a) applies only to IBM; 2) sections (1)(a) and (4)(a) apply to both parties; and 3) section (4)(b) and the last sentence in (1)(b) do not apply. + +PERU + +7. Limitation of Liability + +The following is added to the end of this Section 7 (Limitation of Liability): + +Except as expressly required by law without the possibility of contractual waiver, Licensee and IBM intend that the limitation of liability in this Limitation of Liability section applies to damages caused by all types of claims and causes of action. If any limitation on or exclusion from liability in this section is held by a court of competent jurisdiction to be unenforceable with respect to a particular claim or cause of action, the parties intend that it nonetheless apply to the maximum extent permitted by applicable law to all other claims and causes of action. + +7.1 Items for Which IBM May Be Liable + +The following is added to the end of this Subsection 7.1: + +In accordance with Article 1328 of the Peruvian Civil Code, the limitations and exclusions specified in this section will not apply to damages caused by IBM's willful misconduct ("dolo") or gross negligence ("culpa inexcusable"). + +UNITED STATES OF AMERICA + +10. General + +The following is added to Section 10 as Item 10.l: + +l. U.S. Government Users Restricted Rights - Use, duplication or disclosure is restricted by the GSA IT Schedule 70 Contract with the IBM Corporation. + +The following is added to Item 10.e: + +Each party waives any right to a jury trial in any proceeding arising out of or related to this Agreement. + +ASIA PACIFIC COUNTRY AMENDMENTS + +AUSTRALIA + +5. No Warranties + +The following is added to the first paragraph of Section 5 (No Warranties): + +Although IBM specifies that there are no warranties, Licensee may have certain rights under the Competition and Consumer Act 2010 or other legislation and are only limited to the extent permitted by the applicable legislation. + +7.1 Items for Which IBM May Be Liable + +The following is added to Subsection 7.1 (Items for Which IBM May Be Liable): + +Where IBM is in breach of a condition or warranty implied by the Competition and Consumer Act 2010, IBM's liability is limited to the repair or replacement of the goods, or the supply of equivalent goods. Where that condition or warranty relates to right to sell, quiet possession or clear title, or the goods are of a kind ordinarily obtained for personal, domestic or household use or consumption, then none of the limitations in this paragraph apply. + +HONG KONG SAR, MACAU SAR, AND TAIWAN + +As applies to licenses obtained in Taiwan and the special administrative regions, phrases throughout this Agreement containing the word "country" (for example, "the country in which the original Licensee was granted the license" and "the country in which Licensee obtained the Program license") are replaced with the following: + +(1) In Hong Kong SAR: "Hong Kong SAR" + +(2) In Macau SAR: "Macau SAR" except in the Governing Law clause (Section 11.1) + +(3) In Taiwan: "Taiwan." + +INDIA + +7.1 Items for Which IBM May Be Liable + +The following replaces the terms of Items 1 and 2 of the first paragraph: + +1) liability for bodily injury (including death) or damage to real property and tangible personal property will be limited to that caused by IBM's negligence; and 2) as to any other actual damage arising in any situation involving nonperformance by IBM pursuant to, or in any way related to the subject of this Agreement, IBM's liability will be limited to the charge paid by Licensee for the individual Program that is the subject of the claim. + +10. General + +The following replaces the terms of Item 10.f: + +f. If no suit or other legal action is brought, within three years after the cause of action arose, in respect of any claim that either party may have against the other, the rights of the concerned party in respect of such claim will be forfeited and the other party will stand released from its obligations in respect of such claim. + +INDONESIA + +3.2 Term and Termination + +The following is added to the last paragraph: + +Both parties waive the provision of article 1266 of the Indonesian Civil Code, to the extent the article provision requires such court decree for the termination of an agreement creating mutual obligations. + +JAPAN + +10. General + +The following is added as Item 10.l: + +l. Any doubts concerning this Agreement will be initially resolved between us in good faith and in accordance with the principle of mutual trust. + +MALAYSIA + +7.2 Items for Which IBM Is Not Liable + +The word "SPECIAL" in Item 7.2b is deleted. + +NEW ZEALAND + +5. No Warranties + +The following is added to the first paragraph of this Section 5 (No Warranties): + +Although IBM specifies that there are no warranties, Licensee may have certain rights under the Consumer Guarantees Act 1993 or other legislation which cannot be excluded or limited. The Consumer Guarantees Act 1993 will not apply in respect of any goods which IBM provides, if Licensee requires the goods for the purposes of a business as defined in that Act. + +7. Limitation of Liability + +The following is added: + +Where Programs are not obtained for the purposes of a business as defined in the Consumer Guarantees Act 1993, the limitations in this Section are subject to the limitations in that Act. + +PHILIPPINES + +7.2 Items for Which IBM Is Not Liable + +The following replaces the terms of Item 7.2b: + +b. special (including nominal and exemplary damages), moral, incidental, or indirect damages or for any economic consequential damages; or + +SINGAPORE + +7.2 Items for Which IBM Is Not Liable + +The words "SPECIAL" and "ECONOMIC" are deleted from Item 7.2b. + +10. General + +The following replaces the terms of Item 10.h: + +h. Subject to the rights provided to IBM's suppliers and Program developers as provided in Section 7 above (Limitation of Liability), a person who is not a party to this Agreement will have no right under the Contracts (Right of Third Parties) Act to enforce any of its terms. + +TAIWAN + +7.1 Items for Which IBM May Be Liable + +The following sentences are deleted: + +This limit also applies to any of IBM's subcontractors and Program developers. It is the maximum for which IBM and its subcontractors and Program developers are collectively responsible. + +EUROPE, MIDDLE EAST, AFRICA (EMEA) COUNTRY AMENDMENTS + +EUROPEAN UNION MEMBER STATES + +5. No Warranties + +The following is added to Section 5 (No Warranties): + +In the European Union ("EU"), consumers have legal rights under applicable national legislation governing the sale of consumer goods. Such rights are not affected by the provisions set out in this Section 5 (No Warranties). + +EU MEMBER STATES AND THE COUNTRIES IDENTIFIED BELOW + +Iceland, Liechtenstein, Norway, Switzerland, Turkey, and any other European country that has enacted local data privacy or protection legislation similar to the EU model. + +10. General + +The following replaces Item 10.d: + +(1) Definitions - For the purposes of this Item 10.d, the following additional definitions apply: + +(a) Business Contact Information - business-related contact information disclosed by Licensee to IBM, including names, job titles, business addresses, telephone numbers and email addresses of Licensee's employees and contractors. For Austria, Italy and Switzerland, Business Contact Information also includes information about Licensee and its contractors as legal entities (for example, Licensee's revenue data and other transactional information) + +(b) Business Contact Personnel - Licensee employees and contractors to whom the Business Contact Information relates. + +(c) Data Protection Authority - the authority established by the Data Protection and Electronic Communications Legislation in the applicable country or, for non-EU countries, the authority responsible for supervising the protection of personal data in that country, or (for any of the foregoing) any duly appointed successor entity thereto. + +(d) Data Protection & Electronic Communications Legislation - (i) the applicable local legislation and regulations in force implementing the requirements of EU Directive 95/46/EC (on the protection of individuals with regard to the processing of personal data and on the free movement of such data) and of EU Directive 2002/58/EC (concerning the processing of personal data and the protection of privacy in the electronic communications sector); or (ii) for non-EU countries, the legislation and/or regulations passed in the applicable country relating to the protection of personal data and the regulation of electronic communications involving personal data, including (for any of the foregoing) any statutory replacement or modification thereof. + +(e) IBM Group - International Business Machines Corporation of Armonk, New York, USA, its subsidiaries, and their respective Business Partners and subcontractors. + +(2) Licensee authorizes IBM: + +(a) to process and use Business Contact Information within IBM Group in support of Licensee including the provision of support services, and for the purpose of furthering the business relationship between Licensee and IBM Group, including, without limitation, contacting Business Contact Personnel (by email or otherwise) and marketing IBM Group products and services (the "Specified Purpose"); and + +(b) to disclose Business Contact Information to other members of IBM Group in pursuit of the Specified Purpose only. + +(3) IBM agrees that all Business Contact Information will be processed in accordance with the Data Protection & Electronic Communications Legislation and will be used only for the Specified Purpose. + +(4) To the extent required by the Data Protection & Electronic Communications Legislation, Licensee represents that (a) it has obtained (or will obtain) any consents from (and has issued (or will issue) any notices to) the Business Contact Personnel as are necessary in order to enable IBM Group to process and use the Business Contact Information for the Specified Purpose. + +(5) Licensee authorizes IBM to transfer Business Contact Information outside the European Economic Area, provided that the transfer is made on contractual terms approved by the Data Protection Authority or the transfer is otherwise permitted under the Data Protection & Electronic Communications Legislation. + +AUSTRIA + +7. Limitation of Liability + +The following is added: + +The following limitations and exclusions of IBM's liability do not apply for damages caused by gross negligence or willful misconduct. + +7.1 Items for Which IBM May Be Liable + +The following replaces the first sentence in the first paragraph: + +Circumstances may arise where, because of a default by IBM in the performance of its obligations under this Agreement or other liability, Licensee is entitled to recover damages from IBM. + +In the second sentence of the first paragraph, delete entirely the parenthetical phrase: + +"(including fundamental breach, negligence, misrepresentation, or other contract or tort claim)". + +7.2 Items for Which IBM Is Not Liable + +The following replaces Item 7.2b: + +b. indirect damages or consequential damages; or + +BELGIUM, FRANCE, ITALY, AND LUXEMBOURG + +7. Limitation of Liability + +The following replaces the terms of Section 7 (Limitation of Liability) in its entirety: + +Except as otherwise provided by mandatory law: + +7.1 Items for Which IBM May Be Liable + +IBM's entire liability for all claims in the aggregate for any damages and losses that may arise as a consequence of the fulfillment of its obligations under or in connection with this Agreement or due to any other cause related to this Agreement is limited to the compensation of only those damages and losses proved and actually arising as an immediate and direct consequence of the non-fulfillment of such obligations (if IBM is at fault) or of such cause, for a maximum of EUR 500,000 (five hundred thousand euro). + +The above limitation will not apply to damages for bodily injuries (including death) and damages to real property and tangible personal property for which IBM is legally liable. + +7.2 Items for Which IBM Is Not Liable + +UNDER NO CIRCUMSTANCES IS IBM OR ANY OF ITS PROGRAM DEVELOPERS LIABLE FOR ANY OF THE FOLLOWING, EVEN IF INFORMED OF THEIR POSSIBILITY: 1) LOSS OF, OR DAMAGE TO, DATA; 2) INCIDENTAL, EXEMPLARY OR INDIRECT DAMAGES, OR FOR ANY ECONOMIC CONSEQUENTIAL DAMAGES; AND / OR 3) LOST PROFITS, BUSINESS, REVENUE, GOODWILL, OR ANTICIPATED SAVINGS, EVEN IF THEY ARISE AS AN IMMEDIATE CONSEQUENCE OF THE EVENT THAT GENERATED THE DAMAGES. + +7.3 Suppliers and Program Developers + +The limitation and exclusion of liability herein agreed applies not only to the activities performed by IBM but also to the activities performed by its suppliers and Program developers, and represents the maximum amount for which IBM as well as its suppliers and Program developers are collectively responsible. + +GERMANY + +7. Limitation of Liability + +The following replaces this Section 7 (Limitation of Liability) in its entirety: + +a. IBM will be liable without limit for 1) loss or damage caused by a breach of an express guarantee; 2) damages or losses resulting in bodily injury (including death); and 3) damages caused intentionally or by gross negligence. + +b. In the event of loss, damage and frustrated expenditures caused by slight negligence or in breach of essential contractual obligations, IBM will be liable, regardless of the basis on which Licensee is entitled to claim damages from IBM (including fundamental breach, negligence, misrepresentation, or other contract or tort claim), per claim only up to 500,000 euro for the Program that caused the loss or damage. A number of defaults which together result in, or contribute to, substantially the same loss or damage will be treated as one default. + +c. In the event of loss, damage and frustrated expenditures caused by slight negligence, IBM will not be liable for indirect or consequential damages, even if IBM was informed about the possibility of such loss or damage. + +d. In case of delay on IBM's part: 1) IBM will pay to Licensee an amount not exceeding the loss or damage caused by IBM's delay and 2) IBM will be liable only in respect of the resulting damages that Licensee suffers, subject to the provisions of Items a and b above. + +10. General + +The following replaces the provisions of 10.f: + +f. Any claims resulting from this Agreement are subject to a limitation period of three years, except as stated in Section 5 (No Warranties) of this Agreement. + +The following replaces the provisions of 10.h: + +h. No right or cause of action for any third party is created by this Agreement, nor is IBM responsible for any third party claims against Licensee, except (to the extent permitted in Section 7 (Limitation of Liability)) for: i) bodily injury (including death); or ii) damage to real or tangible personal property for which (in either case) IBM is legally liable to that third party. + +IRELAND + +5. No Warranties + +The following paragraph is added to the second paragraph of this Section 5 (No Warranties): + +Except as expressly provided in these terms and conditions, or Section 12 of the Sale of Goods Act 1893 as amended by the Sale of Goods and Supply of Services Act, 1980 (the "1980 Act"), all conditions or warranties (express or implied, statutory or otherwise) are hereby excluded including, without limitation, any warranties implied by the Sale of Goods Act 1893 as amended by the 1980 Act (including, for the avoidance of doubt, Section 39 of the 1980 Act). + +IRELAND AND UNITED KINGDOM + +2. Agreement Structure + +The following sentence is added: + +Nothing in this paragraph shall have the effect of excluding or limiting liability for fraud. + +7.1 Items for Which IBM May Be Liable + +The following replaces the first paragraph of the Subsection: + +For the purposes of this section, a "Default" means any act, statement, omission or negligence on the part of IBM in connection with, or in relation to, the subject matter of an Agreement in respect of which IBM is legally liable to Licensee, whether in contract or in tort. A number of Defaults which together result in, or contribute to, substantially the same loss or damage will be treated as one Default. + +Circumstances may arise where, because of a Default by IBM in the performance of its obligations under this Agreement or other liability, Licensee is entitled to recover damages from IBM. Regardless of the basis on which Licensee is entitled to claim damages from IBM and except as expressly required by law without the possibility of contractual waiver, IBM's entire liability for any one Default will not exceed the amount of any direct damages, to the extent actually suffered by Licensee as an immediate and direct consequence of the Default, up to 500,000 euro (or the equivalent in local currency) for the Program that is the subject of the claim. Notwithstanding the foregoing, the amount of any damages for bodily injury (including death) and damage to real property and tangible personal property for which IBM is legally liable is not subject to such limitation. + +7.2 Items for Which IBM Is Not Liable + +The following replaces Items 7.2b and 7.2c: + +b. special, incidental, exemplary, or indirect damages or consequential damages; or + +c. wasted management time or lost profits, business, revenue, goodwill, or anticipated savings. + +Z125-5543-05 (07/2011) + + +International Program License Agreement + +Part 1 - General Terms + +BY DOWNLOADING, INSTALLING, COPYING, ACCESSING, CLICKING ON AN "ACCEPT" BUTTON, OR OTHERWISE USING THE PROGRAM, LICENSEE AGREES TO THE TERMS OF THIS AGREEMENT. IF YOU ARE ACCEPTING THESE TERMS ON BEHALF OF LICENSEE, YOU REPRESENT AND WARRANT THAT YOU HAVE FULL AUTHORITY TO BIND LICENSEE TO THESE TERMS. IF YOU DO NOT AGREE TO THESE TERMS, + +* DO NOT DOWNLOAD, INSTALL, COPY, ACCESS, CLICK ON AN "ACCEPT" BUTTON, OR USE THE PROGRAM; AND + +* PROMPTLY RETURN THE UNUSED MEDIA, DOCUMENTATION, AND PROOF OF ENTITLEMENT TO THE PARTY FROM WHOM IT WAS OBTAINED FOR A REFUND OF THE AMOUNT PAID. IF THE PROGRAM WAS DOWNLOADED, DESTROY ALL COPIES OF THE PROGRAM. + +1. Definitions + +"Authorized Use" - the specified level at which Licensee is authorized to execute or run the Program. That level may be measured by number of users, millions of service units ("MSUs"), Processor Value Units ("PVUs"), or other level of use specified by IBM. + +"IBM" - International Business Machines Corporation or one of its subsidiaries. + +"License Information" ("LI") - a document that provides information and any additional terms specific to a Program. The Program's LI is available at www.ibm.com/software/sla. The LI can also be found in the Program's directory, by the use of a system command, or as a booklet included with the Program. + +"Program" - the following, including the original and all whole or partial copies: 1) machine-readable instructions and data, 2) components, files, and modules, 3) audio-visual content (such as images, text, recordings, or pictures), and 4) related licensed materials (such as keys and documentation). + +"Proof of Entitlement" ("PoE") - evidence of Licensee's Authorized Use. The PoE is also evidence of Licensee's eligibility for warranty, future update prices, if any, and potential special or promotional opportunities. If IBM does not provide Licensee with a PoE, then IBM may accept as the PoE the original paid sales receipt or other sales record from the party (either IBM or its reseller) from whom Licensee obtained the Program, provided that it specifies the Program name and Authorized Use obtained. + +"Warranty Period" - one year, starting on the date the original Licensee is granted the license. + +2. Agreement Structure + +This Agreement includes Part 1 - General Terms, Part 2 - Country-unique Terms (if any), the LI, and the PoE and is the complete agreement between Licensee and IBM regarding the use of the Program. It replaces any prior oral or written communications between Licensee and IBM concerning Licensee's use of the Program. The terms of Part 2 may replace or modify those of Part 1. To the extent of any conflict, the LI prevails over both Parts. + +3. License Grant + +The Program is owned by IBM or an IBM supplier, and is copyrighted and licensed, not sold. + +IBM grants Licensee a nonexclusive license to 1) use the Program up to the Authorized Use specified in the PoE, 2) make and install copies to support such Authorized Use, and 3) make a backup copy, all provided that + +a. Licensee has lawfully obtained the Program and complies with the terms of this Agreement; + +b. the backup copy does not execute unless the backed-up Program cannot execute; + +c. Licensee reproduces all copyright notices and other legends of ownership on each copy, or partial copy, of the Program; + +d. Licensee ensures that anyone who uses the Program (accessed either locally or remotely) 1) does so only on Licensee's behalf and 2) complies with the terms of this Agreement; + +e. Licensee does not 1) use, copy, modify, or distribute the Program except as expressly permitted in this Agreement; 2) reverse assemble, reverse compile, otherwise translate, or reverse engineer the Program, except as expressly permitted by law without the possibility of contractual waiver; 3) use any of the Program's components, files, modules, audio-visual content, or related licensed materials separately from that Program; or 4) sublicense, rent, or lease the Program; and + +f. if Licensee obtains this Program as a Supporting Program, Licensee uses this Program only to support the Principal Program and subject to any limitations in the license to the Principal Program, or, if Licensee obtains this Program as a Principal Program, Licensee uses all Supporting Programs only to support this Program, and subject to any limitations in this Agreement. For purposes of this Item "f," a "Supporting Program" is a Program that is part of another IBM Program ("Principal Program") and identified as a Supporting Program in the Principal Program's LI. (To obtain a separate license to a Supporting Program without these restrictions, Licensee should contact the party from whom Licensee obtained the Supporting Program.) + +This license applies to each copy of the Program that Licensee makes. + +3.1 Trade-ups, Updates, Fixes, and Patches + +3.1.1 Trade-ups + +If the Program is replaced by a trade-up Program, the replaced Program's license is promptly terminated. + +3.1.2 Updates, Fixes, and Patches + +When Licensee receives an update, fix, or patch to a Program, Licensee accepts any additional or different terms that are applicable to such update, fix, or patch that are specified in its LI. If no additional or different terms are provided, then the update, fix, or patch is subject solely to this Agreement. If the Program is replaced by an update, Licensee agrees to promptly discontinue use of the replaced Program. + +3.2 Fixed Term Licenses + +If IBM licenses the Program for a fixed term, Licensee's license is terminated at the end of the fixed term, unless Licensee and IBM agree to renew it. + +3.3 Term and Termination + +This Agreement is effective until terminated. + +IBM may terminate Licensee's license if Licensee fails to comply with the terms of this Agreement. + +If the license is terminated for any reason by either party, Licensee agrees to promptly discontinue use of and destroy all of Licensee's copies of the Program. Any terms of this Agreement that by their nature extend beyond termination of this Agreement remain in effect until fulfilled, and apply to both parties' respective successors and assignees. + +4. Charges + +Charges are based on Authorized Use obtained, which is specified in the PoE. IBM does not give credits or refunds for charges already due or paid, except as specified elsewhere in this Agreement. + +If Licensee wishes to increase its Authorized Use, Licensee must notify IBM or an authorized IBM reseller in advance and pay any applicable charges. + +5. Taxes + +If any authority imposes on the Program a duty, tax, levy, or fee, excluding those based on IBM's net income, then Licensee agrees to pay that amount, as specified in an invoice, or supply exemption documentation. Licensee is responsible for any personal property taxes for the Program from the date that Licensee obtains it. If any authority imposes a customs duty, tax, levy, or fee for the import into or the export, transfer, access, or use of the Program outside the country in which the original Licensee was granted the license, then Licensee agrees that it is responsible for, and will pay, any amount imposed. + +6. Money-back Guarantee + +If Licensee is dissatisfied with the Program for any reason and is the original Licensee, Licensee may terminate the license and obtain a refund of the amount Licensee paid for the Program, provided that Licensee returns the Program and PoE to the party from whom Licensee obtained it within 30 days of the date the PoE was issued to Licensee. If the license is for a fixed term that is subject to renewal, then Licensee may obtain a refund only if the Program and its PoE are returned within the first 30 days of the initial term. If Licensee downloaded the Program, Licensee should contact the party from whom Licensee obtained it for instructions on how to obtain the refund. + +7. Program Transfer + +Licensee may transfer the Program and all of Licensee's license rights and obligations to another party only if that party agrees to the terms of this Agreement. If the license is terminated for any reason by either party, Licensee is prohibited from transferring the Program to another party. Licensee may not transfer a portion of 1) the Program or 2) the Program's Authorized Use. When Licensee transfers the Program, Licensee must also transfer a hard copy of this Agreement, including the LI and PoE. Immediately after the transfer, Licensee's license terminates. + +8. Warranty and Exclusions + +8.1 Limited Warranty + +IBM warrants that the Program, when used in its specified operating environment, will conform to its specifications. The Program's specifications, and specified operating environment information, can be found in documentation accompanying the Program (such as a read-me file) or other information published by IBM (such as an announcement letter). Licensee agrees that such documentation and other Program content may be supplied only in the English language, unless otherwise required by local law without the possibility of contractual waiver or limitation. + +The warranty applies only to the unmodified portion of the Program. IBM does not warrant uninterrupted or error-free operation of the Program, or that IBM will correct all Program defects. Licensee is responsible for the results obtained from the use of the Program. + +During the Warranty Period, IBM provides Licensee with access to IBM databases containing information on known Program defects, defect corrections, restrictions, and bypasses at no additional charge. Consult the IBM Software Support Handbook for further information at www.ibm.com/software/support. + +If the Program does not function as warranted during the Warranty Period and the problem cannot be resolved with information available in the IBM databases, Licensee may return the Program and its PoE to the party (either IBM or its reseller) from whom Licensee obtained it and receive a refund of the amount Licensee paid. After returning the Program, Licensee's license terminates. If Licensee downloaded the Program, Licensee should contact the party from whom Licensee obtained it for instructions on how to obtain the refund. + +8.2 Exclusions + +THESE WARRANTIES ARE LICENSEE'S EXCLUSIVE WARRANTIES AND REPLACE ALL OTHER WARRANTIES OR CONDITIONS, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OR CONDITIONS OF MERCHANTABILITY, SATISFACTORY QUALITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE, AND ANY WARRANTY OR CONDITION OF NON-INFRINGEMENT. SOME STATES OR JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF EXPRESS OR IMPLIED WARRANTIES, SO THE ABOVE EXCLUSION MAY NOT APPLY TO LICENSEE. IN THAT EVENT, SUCH WARRANTIES ARE LIMITED IN DURATION TO THE WARRANTY PERIOD. NO WARRANTIES APPLY AFTER THAT PERIOD. SOME STATES OR JURISDICTIONS DO NOT ALLOW LIMITATIONS ON HOW LONG AN IMPLIED WARRANTY LASTS, SO THE ABOVE LIMITATION MAY NOT APPLY TO LICENSEE. + +THESE WARRANTIES GIVE LICENSEE SPECIFIC LEGAL RIGHTS. LICENSEE MAY ALSO HAVE OTHER RIGHTS THAT VARY FROM STATE TO STATE OR JURISDICTION TO JURISDICTION. + +THE WARRANTIES IN THIS SECTION 8 (WARRANTY AND EXCLUSIONS) ARE PROVIDED SOLELY BY IBM. THE DISCLAIMERS IN THIS SUBSECTION 8.2 (EXCLUSIONS), HOWEVER, ALSO APPLY TO IBM'S SUPPLIERS OF THIRD PARTY CODE. THOSE SUPPLIERS PROVIDE SUCH CODE WITHOUT WARRANTIES OR CONDITION OF ANY KIND. THIS PARAGRAPH DOES NOT NULLIFY IBM'S WARRANTY OBLIGATIONS UNDER THIS AGREEMENT. + +9. Licensee Data and Databases + +To assist Licensee in isolating the cause of a problem with the Program, IBM may request that Licensee 1) allow IBM to remotely access Licensee's system or 2) send Licensee information or system data to IBM. However, IBM is not obligated to provide such assistance unless IBM and Licensee enter a separate written agreement under which IBM agrees to provide to Licensee that type of support, which is beyond IBM's warranty obligations in this Agreement. In any event, IBM uses information about errors and problems to improve its products and services, and assist with its provision of related support offerings. For these purposes, IBM may use IBM entities and subcontractors (including in one or more countries other than the one in which Licensee is located), and Licensee authorizes IBM to do so. + +Licensee remains responsible for 1) any data and the content of any database Licensee makes available to IBM, 2) the selection and implementation of procedures and controls regarding access, security, encryption, use, and transmission of data (including any personally-identifiable data), and 3) backup and recovery of any database and any stored data. Licensee will not send or provide IBM access to any personally-identifiable information, whether in data or any other form, and will be responsible for reasonable costs and other amounts that IBM may incur relating to any such information mistakenly provided to IBM or the loss or disclosure of such information by IBM, including those arising out of any third party claims. + +10. Limitation of Liability + +The limitations and exclusions in this Section 10 (Limitation of Liability) apply to the full extent they are not prohibited by applicable law without the possibility of contractual waiver. + +10.1 Items for Which IBM May Be Liable + +Circumstances may arise where, because of a default on IBM's part or other liability, Licensee is entitled to recover damages from IBM. Regardless of the basis on which Licensee is entitled to claim damages from IBM (including fundamental breach, negligence, misrepresentation, or other contract or tort claim), IBM's entire liability for all claims in the aggregate arising from or related to each Program or otherwise arising under this Agreement will not exceed the amount of any 1) damages for bodily injury (including death) and damage to real property and tangible personal property and 2) other actual direct damages up to the charges (if the Program is subject to fixed term charges, up to twelve months' charges) Licensee paid for the Program that is the subject of the claim. + +This limit also applies to any of IBM's Program developers and suppliers. It is the maximum for which IBM and its Program developers and suppliers are collectively responsible. + +10.2 Items for Which IBM Is Not Liable + +UNDER NO CIRCUMSTANCES IS IBM, ITS PROGRAM DEVELOPERS OR SUPPLIERS LIABLE FOR ANY OF THE FOLLOWING, EVEN IF INFORMED OF THEIR POSSIBILITY: + +a. LOSS OF, OR DAMAGE TO, DATA; + +b. SPECIAL, INCIDENTAL, EXEMPLARY, OR INDIRECT DAMAGES, OR FOR ANY ECONOMIC CONSEQUENTIAL DAMAGES; OR + +c. LOST PROFITS, BUSINESS, REVENUE, GOODWILL, OR ANTICIPATED SAVINGS. + +11. Compliance Verification + +For purposes of this Section 11 (Compliance Verification), "IPLA Program Terms" means 1) this Agreement and applicable amendments and transaction documents provided by IBM, and 2) IBM software policies that may be found at the IBM Software Policy website (www.ibm.com/softwarepolicies), including but not limited to those policies concerning backup, sub-capacity pricing, and migration. + +The rights and obligations set forth in this Section 11 remain in effect during the period the Program is licensed to Licensee, and for two years thereafter. + +11.1 Verification Process + +Licensee agrees to create, retain, and provide to IBM and its auditors accurate written records, system tool outputs, and other system information sufficient to provide auditable verification that Licensee's use of all Programs is in compliance with the IPLA Program Terms, including, without limitation, all of IBM's applicable licensing and pricing qualification terms. Licensee is responsible for 1) ensuring that it does not exceed its Authorized Use, and 2) remaining in compliance with IPLA Program Terms. + +Upon reasonable notice, IBM may verify Licensee's compliance with IPLA Program Terms at all sites and for all environments in which Licensee uses (for any purpose) Programs subject to IPLA Program Terms. Such verification will be conducted in a manner that minimizes disruption to Licensee's business, and may be conducted on Licensee's premises, during normal business hours. IBM may use an independent auditor to assist with such verification, provided IBM has a written confidentiality agreement in place with such auditor. + +11.2 Resolution + +IBM will notify Licensee in writing if any such verification indicates that Licensee has used any Program in excess of its Authorized Use or is otherwise not in compliance with the IPLA Program Terms. Licensee agrees to promptly pay directly to IBM the charges that IBM specifies in an invoice for 1) any such excess use, 2) support for such excess use for the lesser of the duration of such excess use or two years, and 3) any additional charges and other liabilities determined as a result of such verification. + +12. Third Party Notices + +The Program may include third party code that IBM, not the third party, licenses to Licensee under this Agreement. Notices, if any, for the third party code ("Third Party Notices") are included for Licensee's information only. These notices can be found in the Program's NOTICES file(s). Information on how to obtain source code for certain third party code can be found in the Third Party Notices. If in the Third Party Notices IBM identifies third party code as "Modifiable Third Party Code," IBM authorizes Licensee to 1) modify the Modifiable Third Party Code and 2) reverse engineer the Program modules that directly interface with the Modifiable Third Party Code provided that it is only for the purpose of debugging Licensee's modifications to such third party code. IBM's service and support obligations, if any, apply only to the unmodified Program. + +13. General + +a. Nothing in this Agreement affects any statutory rights of consumers that cannot be waived or limited by contract. + +b. For Programs IBM provides to Licensee in tangible form, IBM fulfills its shipping and delivery obligations upon the delivery of such Programs to the IBM-designated carrier, unless otherwise agreed to in writing by Licensee and IBM. + +c. If any provision of this Agreement is held to be invalid or unenforceable, the remaining provisions of this Agreement remain in full force and effect. + +d. Licensee agrees to comply with all applicable export and import laws and regulations, including U.S. embargo and sanctions regulations and prohibitions on export for certain end uses or to certain users. + +e. Licensee authorizes International Business Machines Corporation and its subsidiaries (and their successors and assigns, contractors and IBM Business Partners) to store and use Licensee's business contact information wherever they do business, in connection with IBM products and services, or in furtherance of IBM's business relationship with Licensee. + +f. Each party will allow the other reasonable opportunity to comply before it claims that the other has not met its obligations under this Agreement. The parties will attempt in good faith to resolve all disputes, disagreements, or claims between the parties relating to this Agreement. + +g. Unless otherwise required by applicable law without the possibility of contractual waiver or limitation: 1) neither party will bring a legal action, regardless of form, for any claim arising out of or related to this Agreement more than two years after the cause of action arose; and 2) upon the expiration of such time limit, any such claim and all respective rights related to the claim lapse. + +h. Neither Licensee nor IBM is responsible for failure to fulfill any obligations due to causes beyond its control. + +i. No right or cause of action for any third party is created by this Agreement, nor is IBM responsible for any third party claims against Licensee, except as permitted in Subsection 10.1 (Items for Which IBM May Be Liable) above for bodily injury (including death) or damage to real or tangible personal property for which IBM is legally liable to that third party. + +j. In entering into this Agreement, neither party is relying on any representation not specified in this Agreement, including but not limited to any representation concerning: 1) the performance or function of the Program, other than as expressly warranted in Section 8 (Warranty and Exclusions) above; 2) the experiences or recommendations of other parties; or 3) any results or savings that Licensee may achieve. + +k. IBM has signed agreements with certain organizations (called "IBM Business Partners") to promote, market, and support certain Programs. IBM Business Partners remain independent and separate from IBM. IBM is not responsible for the actions or statements of IBM Business Partners or obligations they have to Licensee. + +l. The license and intellectual property indemnification terms of Licensee's other agreements with IBM (such as the IBM Customer Agreement) do not apply to Program licenses granted under this Agreement. + +14. Geographic Scope and Governing Law + +14.1 Governing Law + +Both parties agree to the application of the laws of the country in which Licensee obtained the Program license to govern, interpret, and enforce all of Licensee's and IBM's respective rights, duties, and obligations arising from, or relating in any manner to, the subject matter of this Agreement, without regard to conflict of law principles. + +The United Nations Convention on Contracts for the International Sale of Goods does not apply. + +14.2 Jurisdiction + +All rights, duties, and obligations are subject to the courts of the country in which Licensee obtained the Program license. + +Part 2 - Country-unique Terms + +For licenses granted in the countries specified below, the following terms replace or modify the referenced terms in Part 1. All terms in Part 1 that are not changed by these amendments remain unchanged and in effect. This Part 2 is organized as follows: + +* Multiple country amendments to Part 1, Section 14 (Governing Law and Jurisdiction); + +* Americas country amendments to other Agreement terms; + +* Asia Pacific country amendments to other Agreement terms; and + +* Europe, Middle East, and Africa country amendments to other Agreement terms. + +Multiple country amendments to Part 1, Section 14 (Governing Law and Jurisdiction) + +14.1 Governing Law + +The phrase "the laws of the country in which Licensee obtained the Program license" in the first paragraph of 14.1 Governing Law is replaced by the following phrases in the countries below: + +AMERICAS + +(1) In Canada: the laws in the Province of Ontario; + +(2) in Mexico: the federal laws of the Republic of Mexico; + +(3) in the United States, Anguilla, Antigua/Barbuda, Aruba, British Virgin Islands, Cayman Islands, Dominica, Grenada, Guyana, Saint Kitts and Nevis, Saint Lucia, Saint Maarten, and Saint Vincent and the Grenadines: the laws of the State of New York, United States; + +(4) in Venezuela: the laws of the Bolivarian Republic of Venezuela; + +ASIA PACIFIC + +(5) in Cambodia and Laos: the laws of the State of New York, United States; + +(6) in Australia: the laws of the State or Territory in which the transaction is performed; + +(7) in Hong Kong SAR and Macau SAR: the laws of Hong Kong Special Administrative Region ("SAR"); + +(8) in Taiwan: the laws of Taiwan; + +EUROPE, MIDDLE EAST, AND AFRICA + +(9) in Albania, Armenia, Azerbaijan, Belarus, Bosnia-Herzegovina, Bulgaria, Croatia, Former Yugoslav Republic of Macedonia, Georgia, Hungary, Kazakhstan, Kyrgyzstan, Moldova, Montenegro, Poland, Romania, Russia, Serbia, Slovakia, Tajikistan, Turkmenistan, Ukraine, and Uzbekistan: the laws of Austria; + +(10) in Algeria, Andorra, Benin, Burkina Faso, Cameroon, Cape Verde, Central African Republic, Chad, Comoros, Congo Republic, Djibouti, Democratic Republic of Congo, Equatorial Guinea, French Guiana, French Polynesia, Gabon, Gambia, Guinea, Guinea-Bissau, Ivory Coast, Lebanon, Madagascar, Mali, Mauritania, Mauritius, Mayotte, Morocco, New Caledonia, Niger, Reunion, Senegal, Seychelles, Togo, Tunisia, Vanuatu, and Wallis and Futuna: the laws of France; + +(11) in Estonia, Latvia, and Lithuania: the laws of Finland; + +(12) in Angola, Bahrain, Botswana, Burundi, Egypt, Eritrea, Ethiopia, Ghana, Jordan, Kenya, Kuwait, Liberia, Malawi, Malta, Mozambique, Nigeria, Oman, Pakistan, Qatar, Rwanda, Sao Tome and Principe, Saudi Arabia, Sierra Leone, Somalia, Tanzania, Uganda, United Arab Emirates, the United Kingdom, West Bank/Gaza, Yemen, Zambia, and Zimbabwe: the laws of England; and + +(13) in South Africa, Namibia, Lesotho, and Swaziland: the laws of the Republic of South Africa. + +14.2 Jurisdiction + +The following paragraph pertains to jurisdiction and replaces Subsection 14.2 (Jurisdiction) as it applies for those countries identified below: + +All rights, duties, and obligations are subject to the courts of the country in which Licensee obtained the Program license except that in the countries identified below all disputes arising out of or related to this Agreement, including summary proceedings, will be brought before and subject to the exclusive jurisdiction of the following courts of competent jurisdiction: + +AMERICAS + +(1) In Argentina: the Ordinary Commercial Court of the city of Buenos Aires; + +(2) in Brazil: the court of Rio de Janeiro, RJ; + +(3) in Chile: the Civil Courts of Justice of Santiago; + +(4) in Ecuador: the civil judges of Quito for executory or summary proceedings (as applicable); + +(5) in Mexico: the courts located in Mexico City, Federal District; + +(6) in Peru: the judges and tribunals of the judicial district of Lima, Cercado; + +(7) in Uruguay: the courts of the city of Montevideo; + +(8) in Venezuela: the courts of the metropolitan area of the city of Caracas; + +EUROPE, MIDDLE EAST, AND AFRICA + +(9) in Austria: the court of law in Vienna, Austria (Inner-City); + +(10) in Algeria, Andorra, Benin, Burkina Faso, Cameroon, Cape Verde, Central African Republic, Chad, Comoros, Congo Republic, Djibouti, Democratic Republic of Congo, Equatorial Guinea, France, French Guiana, French Polynesia, Gabon, Gambia, Guinea, Guinea-Bissau, Ivory Coast, Lebanon, Madagascar, Mali, Mauritania, Mauritius, Mayotte, Monaco, Morocco, New Caledonia, Niger, Reunion, Senegal, Seychelles, Togo, Tunisia, Vanuatu, and Wallis and Futuna: the Commercial Court of Paris; + +(11) in Angola, Bahrain, Botswana, Burundi, Egypt, Eritrea, Ethiopia, Ghana, Jordan, Kenya, Kuwait, Liberia, Malawi, Malta, Mozambique, Nigeria, Oman, Pakistan, Qatar, Rwanda, Sao Tome and Principe, Saudi Arabia, Sierra Leone, Somalia, Tanzania, Uganda, United Arab Emirates, the United Kingdom, West Bank/Gaza, Yemen, Zambia, and Zimbabwe: the English courts; + +(12) in South Africa, Namibia, Lesotho, and Swaziland: the High Court in Johannesburg; + +(13) in Greece: the competent court of Athens; + +(14) in Israel: the courts of Tel Aviv-Jaffa; + +(15) in Italy: the courts of Milan; + +(16) in Portugal: the courts of Lisbon; + +(17) in Spain: the courts of Madrid; and + +(18) in Turkey: the Istanbul Central Courts and Execution Directorates of Istanbul, the Republic of Turkey. + +14.3 Arbitration + +The following paragraph is added as a new Subsection 14.3 (Arbitration) as it applies for those countries identified below. The provisions of this Subsection 14.3 prevail over those of Subsection 14.2 (Jurisdiction) to the extent permitted by the applicable governing law and rules of procedure: + +ASIA PACIFIC + +(1) In Cambodia, India, Laos, Philippines, and Vietnam: + +Disputes arising out of or in connection with this Agreement will be finally settled by arbitration which will be held in Singapore in accordance with the Arbitration Rules of Singapore International Arbitration Center ("SIAC Rules") then in effect. The arbitration award will be final and binding for the parties without appeal and will be in writing and set forth the findings of fact and the conclusions of law. + +The number of arbitrators will be three, with each side to the dispute being entitled to appoint one arbitrator. The two arbitrators appointed by the parties will appoint a third arbitrator who will act as chairman of the proceedings. Vacancies in the post of chairman will be filled by the president of the SIAC. Other vacancies will be filled by the respective nominating party. Proceedings will continue from the stage they were at when the vacancy occurred. + +If one of the parties refuses or otherwise fails to appoint an arbitrator within 30 days of the date the other party appoints its, the first appointed arbitrator will be the sole arbitrator, provided that the arbitrator was validly and properly appointed. + +All proceedings will be conducted, including all documents presented in such proceedings, in the English language. The English language version of this Agreement prevails over any other language version. + +(2) In the People's Republic of China: + +In case no settlement can be reached, the disputes will be submitted to China International Economic and Trade Arbitration Commission for arbitration according to the then effective rules of the said Arbitration Commission. The arbitration will take place in Beijing and be conducted in Chinese. The arbitration award will be final and binding on both parties. During the course of arbitration, this agreement will continue to be performed except for the part which the parties are disputing and which is undergoing arbitration. + +(3) In Indonesia: + +Each party will allow the other reasonable opportunity to comply before it claims that the other has not met its obligations under this Agreement. The parties will attempt in good faith to resolve all disputes, disagreements, or claims between the parties relating to this Agreement. Unless otherwise required by applicable law without the possibility of contractual waiver or limitation, i) neither party will bring a legal action, regardless of form, arising out of or related to this Agreement or any transaction under it more than two years after the cause of action arose; and ii) after such time limit, any legal action arising out of this Agreement or any transaction under it and all respective rights related to any such action lapse. + +Disputes arising out of or in connection with this Agreement shall be finally settled by arbitration that shall be held in Jakarta, Indonesia in accordance with the rules of Board of the Indonesian National Board of Arbitration (Badan Arbitrase Nasional Indonesia or "BANI") then in effect. The arbitration award shall be final and binding for the parties without appeal and shall be in writing and set forth the findings of fact and the conclusions of law. + +The number of arbitrators shall be three, with each side to the dispute being entitled to appoint one arbitrator. The two arbitrators appointed by the parties shall appoint a third arbitrator who shall act as chairman of the proceedings. Vacancies in the post of chairman shall be filled by the chairman of the BANI. Other vacancies shall be filled by the respective nominating party. Proceedings shall continue from the stage they were at when the vacancy occurred. + +If one of the parties refuses or otherwise fails to appoint an arbitrator within 30 days of the date the other party appoints its, the first appointed arbitrator shall be the sole arbitrator, provided that the arbitrator was validly and properly appointed. + +All proceedings shall be conducted, including all documents presented in such proceedings, in the English and/or Indonesian language. + +EUROPE, MIDDLE EAST, AND AFRICA + +(4) In Albania, Armenia, Azerbaijan, Belarus, Bosnia-Herzegovina, Bulgaria, Croatia, Former Yugoslav Republic of Macedonia, Georgia, Hungary, Kazakhstan, Kyrgyzstan, Moldova, Montenegro, Poland, Romania, Russia, Serbia, Slovakia, Tajikistan, Turkmenistan, Ukraine, and Uzbekistan: + +All disputes arising out of this Agreement or related to its violation, termination or nullity will be finally settled under the Rules of Arbitration and Conciliation of the International Arbitral Center of the Federal Economic Chamber in Vienna (Vienna Rules) by three arbitrators appointed in accordance with these rules. The arbitration will be held in Vienna, Austria, and the official language of the proceedings will be English. The decision of the arbitrators will be final and binding upon both parties. Therefore, pursuant to paragraph 598 (2) of the Austrian Code of Civil Procedure, the parties expressly waive the application of paragraph 595 (1) figure 7 of the Code. IBM may, however, institute proceedings in a competent court in the country of installation. + +(5) In Estonia, Latvia, and Lithuania: + +All disputes arising in connection with this Agreement will be finally settled in arbitration that will be held in Helsinki, Finland in accordance with the arbitration laws of Finland then in effect. Each party will appoint one arbitrator. The arbitrators will then jointly appoint the chairman. If arbitrators cannot agree on the chairman, then the Central Chamber of Commerce in Helsinki will appoint the chairman. + +AMERICAS COUNTRY AMENDMENTS + +CANADA + +10.1 Items for Which IBM May be Liable + +The following replaces Item 1 in the first paragraph of this Subsection 10.1 (Items for Which IBM May be Liable): + +1) damages for bodily injury (including death) and physical harm to real property and tangible personal property caused by IBM's negligence; and + +13. General + +The following replaces Item 13.d: + +d. Licensee agrees to comply with all applicable export and import laws and regulations, including those of that apply to goods of United States origin and that prohibit or limit export for certain uses or to certain users. + +The following replaces Item 13.i: + +i. No right or cause of action for any third party is created by this Agreement or any transaction under it, nor is IBM responsible for any third party claims against Licensee except as permitted by the Limitation of Liability section above for bodily injury (including death) or physical harm to real or tangible personal property caused by IBM's negligence for which IBM is legally liable to that third party. + +The following is added as Item 13.m: + +m. For purposes of this Item 13.m, "Personal Data" refers to information relating to an identified or identifiable individual made available by one of the parties, its personnel or any other individual to the other in connection with this Agreement. The following provisions apply in the event that one party makes Personal Data available to the other: + +(1) General + +(a) Each party is responsible for complying with any obligations applying to it under applicable Canadian data privacy laws and regulations ("Laws"). + +(b) Neither party will request Personal Data beyond what is necessary to fulfill the purpose(s) for which it is requested. The purpose(s) for requesting Personal Data must be reasonable. Each party will agree in advance as to the type of Personal Data that is required to be made available. + +(2) Security Safeguards + +(a) Each party acknowledges that it is solely responsible for determining and communicating to the other the appropriate technological, physical and organizational security measures required to protect Personal Data. + +(b) Each party will ensure that Personal Data is protected in accordance with the security safeguards communicated and agreed to by the other. + +(c) Each party will ensure that any third party to whom Personal Data is transferred is bound by the applicable terms of this section. + +(d) Additional or different services required to comply with the Laws will be deemed a request for new services. + +(3) Use + +Each party agrees that Personal Data will only be used, accessed, managed, transferred, disclosed to third parties or otherwise processed to fulfill the purpose(s) for which it was made available. + +(4) Access Requests + +(a) Each party agrees to reasonably cooperate with the other in connection with requests to access or amend Personal Data. + +(b) Each party agrees to reimburse the other for any reasonable charges incurred in providing each other assistance. + +(c) Each party agrees to amend Personal Data only upon receiving instructions to do so from the other party or its personnel. + +(5) Retention + +Each party will promptly return to the other or destroy all Personal Data that is no longer necessary to fulfill the purpose(s) for which it was made available, unless otherwise instructed by the other or its personnel or required by law. + +(6) Public Bodies Who Are Subject to Public Sector Privacy Legislation + +For Licensees who are public bodies subject to public sector privacy legislation, this Item 13.m applies only to Personal Data made available to Licensee in connection with this Agreement, and the obligations in this section apply only to Licensee, except that: 1) section (2)(a) applies only to IBM; 2) sections (1)(a) and (4)(a) apply to both parties; and 3) section (4)(b) and the last sentence in (1)(b) do not apply. + +PERU + +10. Limitation of Liability + +The following is added to the end of this Section 10 (Limitation of Liability): + +Except as expressly required by law without the possibility of contractual waiver, Licensee and IBM intend that the limitation of liability in this Limitation of Liability section applies to damages caused by all types of claims and causes of action. If any limitation on or exclusion from liability in this section is held by a court of competent jurisdiction to be unenforceable with respect to a particular claim or cause of action, the parties intend that it nonetheless apply to the maximum extent permitted by applicable law to all other claims and causes of action. + +10.1 Items for Which IBM May be Liable + +The following is added at the end of this Subsection 10.1: + +In accordance with Article 1328 of the Peruvian Civil Code, the limitations and exclusions specified in this section will not apply to damages caused by IBM's willful misconduct ("dolo") or gross negligence ("culpa inexcusable"). + +UNITED STATES OF AMERICA + +5. Taxes + +The following is added at the end of this Section 5 (Taxes) + +For Programs delivered electronically in the United States for which Licensee claims a state sales and use tax exemption, Licensee agrees not to receive any tangible personal property (e.g., media and publications) associated with the electronic program. + +Licensee agrees to be responsible for any sales and use tax liabilities that may arise as a result of Licensee's subsequent redistribution of Programs after delivery by IBM. + +13. General + +The following is added to Section 13 as Item 13.m: + +U.S. Government Users Restricted Rights - Use, duplication or disclosure is restricted by the GSA IT Schedule 70 Contract with the IBM Corporation. + +The following is added to Item 13.f: + +Each party waives any right to a jury trial in any proceeding arising out of or related to this Agreement. + +ASIA PACIFIC COUNTRY AMENDMENTS + +AUSTRALIA + +5. Taxes + +The following sentences replace the first two sentences of Section 5 (Taxes): + +If any government or authority imposes a duty, tax (other than income tax), levy, or fee, on this Agreement or on the Program itself, that is not otherwise provided for in the amount payable, Licensee agrees to pay it when IBM invoices Licensee. If the rate of GST changes, IBM may adjust the charge or other amount payable to take into account that change from the date the change becomes effective. + +8.1 Limited Warranty + +The following is added to Subsection 8.1 (Limited Warranty): + +The warranties specified this Section are in addition to any rights Licensee may have under the Competition and Consumer Act 2010 or other legislation and are only limited to the extent permitted by the applicable legislation. + +10.1 Items for Which IBM May be Liable + +The following is added to Subsection 10.1 (Items for Which IBM May be Liable): + +Where IBM is in breach of a condition or warranty implied by the Competition and Consumer Act 2010, IBM's liability is limited to the repair or replacement of the goods, or the supply of equivalent goods. Where that condition or warranty relates to right to sell, quiet possession or clear title, or the goods are of a kind ordinarily obtained for personal, domestic or household use or consumption, then none of the limitations in this paragraph apply. + +HONG KONG SAR, MACAU SAR, AND TAIWAN + +As applies to licenses obtained in Taiwan and the special administrative regions, phrases throughout this Agreement containing the word "country" (for example, "the country in which the original Licensee was granted the license" and "the country in which Licensee obtained the Program license") are replaced with the following: + +(1) In Hong Kong SAR: "Hong Kong SAR" + +(2) In Macau SAR: "Macau SAR" except in the Governing Law clause (Section 14.1) + +(3) In Taiwan: "Taiwan." + +INDIA + +10.1 Items for Which IBM May be Liable + +The following replaces the terms of Items 1 and 2 of the first paragraph: + +1) liability for bodily injury (including death) or damage to real property and tangible personal property will be limited to that caused by IBM's negligence; and 2) as to any other actual damage arising in any situation involving nonperformance by IBM pursuant to, or in any way related to the subject of this Agreement, IBM's liability will be limited to the charge paid by Licensee for the individual Program that is the subject of the claim. + +13. General + +The following replaces the terms of Item 13.g: + +If no suit or other legal action is brought, within three years after the cause of action arose, in respect of any claim that either party may have against the other, the rights of the concerned party in respect of such claim will be forfeited and the other party will stand released from its obligations in respect of such claim. + +INDONESIA + +3.3 Term and Termination + +The following is added to the last paragraph: + +Both parties waive the provision of article 1266 of the Indonesian Civil Code, to the extent the article provision requires such court decree for the termination of an agreement creating mutual obligations. + +JAPAN + +13. General + +The following is inserted after Item 13.f: + +Any doubts concerning this Agreement will be initially resolved between us in good faith and in accordance with the principle of mutual trust. + +MALAYSIA + +10.2 Items for Which IBM Is not Liable + +The word "SPECIAL" in Item 10.2b is deleted. + +NEW ZEALAND + +8.1 Limited Warranty + +The following is added: + +The warranties specified in this Section are in addition to any rights Licensee may have under the Consumer Guarantees Act 1993 or other legislation which cannot be excluded or limited. The Consumer Guarantees Act 1993 will not apply in respect of any goods which IBM provides, if Licensee requires the goods for the purposes of a business as defined in that Act. + +10. Limitation of Liability + +The following is added: + +Where Programs are not obtained for the purposes of a business as defined in the Consumer Guarantees Act 1993, the limitations in this Section are subject to the limitations in that Act. + +PEOPLE'S REPUBLIC OF CHINA + +4. Charges + +The following is added: + +All banking charges incurred in the People's Republic of China will be borne by Licensee and those incurred outside the People's Republic of China will be borne by IBM. + +PHILIPPINES + +10.2 Items for Which IBM Is not Liable + +The following replaces the terms of Item 10.2b: + +b. special (including nominal and exemplary damages), moral, incidental, or indirect damages or for any economic consequential damages; or + +SINGAPORE + +10.2 Items for Which IBM Is not Liable + +The words "SPECIAL" and "ECONOMIC" are deleted from Item 10.2b. + +13. General + +The following replaces the terms of Item 13.i: + +Subject to the rights provided to IBM's suppliers and Program developers as provided in Section 10 above (Limitation of Liability), a person who is not a party to this Agreement will have no right under the Contracts (Right of Third Parties) Act to enforce any of its terms. + +TAIWAN + +8.1 Limited Warranty + +The last paragraph is deleted. + +10.1 Items for Which IBM May Be Liable + +The following sentences are deleted: + +This limit also applies to any of IBM's subcontractors and Program developers. It is the maximum for which IBM and its subcontractors and Program developers are collectively responsible. + +EUROPE, MIDDLE EAST, AFRICA (EMEA) COUNTRY AMENDMENTS + +EUROPEAN UNION MEMBER STATES + +8. Warranty and Exclusions + +The following is added to Section 8 (Warranty and Exclusion): + +In the European Union ("EU"), consumers have legal rights under applicable national legislation governing the sale of consumer goods. Such rights are not affected by the provisions set out in this Section 8 (Warranty and Exclusions). The territorial scope of the Limited Warranty is worldwide. + +EU MEMBER STATES AND THE COUNTRIES IDENTIFIED BELOW + +Iceland, Liechtenstein, Norway, Switzerland, Turkey, and any other European country that has enacted local data privacy or protection legislation similar to the EU model. + +13. General + +The following replaces Item 13.e: + +(1) Definitions - For the purposes of this Item 13.e, the following additional definitions apply: + +(a) Business Contact Information - business-related contact information disclosed by Licensee to IBM, including names, job titles, business addresses, telephone numbers and email addresses of Licensee's employees and contractors. For Austria, Italy and Switzerland, Business Contact Information also includes information about Licensee and its contractors as legal entities (for example, Licensee's revenue data and other transactional information) + +(b) Business Contact Personnel - Licensee employees and contractors to whom the Business Contact Information relates. + +(c) Data Protection Authority - the authority established by the Data Protection and Electronic Communications Legislation in the applicable country or, for non-EU countries, the authority responsible for supervising the protection of personal data in that country, or (for any of the foregoing) any duly appointed successor entity thereto. + +(d) Data Protection & Electronic Communications Legislation - (i) the applicable local legislation and regulations in force implementing the requirements of EU Directive 95/46/EC (on the protection of individuals with regard to the processing of personal data and on the free movement of such data) and of EU Directive 2002/58/EC (concerning the processing of personal data and the protection of privacy in the electronic communications sector); or (ii) for non-EU countries, the legislation and/or regulations passed in the applicable country relating to the protection of personal data and the regulation of electronic communications involving personal data, including (for any of the foregoing) any statutory replacement or modification thereof. + +(e) IBM Group - International Business Machines Corporation of Armonk, New York, USA, its subsidiaries, and their respective Business Partners and subcontractors. + +(2) Licensee authorizes IBM: + +(a) to process and use Business Contact Information within IBM Group in support of Licensee including the provision of support services, and for the purpose of furthering the business relationship between Licensee and IBM Group, including, without limitation, contacting Business Contact Personnel (by email or otherwise) and marketing IBM Group products and services (the "Specified Purpose"); and + +(b) to disclose Business Contact Information to other members of IBM Group in pursuit of the Specified Purpose only. + +(3) IBM agrees that all Business Contact Information will be processed in accordance with the Data Protection & Electronic Communications Legislation and will be used only for the Specified Purpose. + +(4) To the extent required by the Data Protection & Electronic Communications Legislation, Licensee represents that (a) it has obtained (or will obtain) any consents from (and has issued (or will issue) any notices to) the Business Contact Personnel as are necessary in order to enable IBM Group to process and use the Business Contact Information for the Specified Purpose. + +(5) Licensee authorizes IBM to transfer Business Contact Information outside the European Economic Area, provided that the transfer is made on contractual terms approved by the Data Protection Authority or the transfer is otherwise permitted under the Data Protection & Electronic Communications Legislation. + +AUSTRIA + +8.2 Exclusions + +The following is deleted from the first paragraph: + +MERCHANTABILITY, SATISFACTORY QUALITY + +10. Limitation of Liability + +The following is added: + +The following limitations and exclusions of IBM's liability do not apply for damages caused by gross negligence or willful misconduct. + +10.1 Items for Which IBM May Be Liable + +The following replaces the first sentence in the first paragraph: + +Circumstances may arise where, because of a default by IBM in the performance of its obligations under this Agreement or other liability, Licensee is entitled to recover damages from IBM. + +In the second sentence of the first paragraph, delete entirely the parenthetical phrase: + +"(including fundamental breach, negligence, misrepresentation, or other contract or tort claim)". + +10.2 Items for Which IBM Is Not Liable + +The following replaces Item 10.2b: + +b. indirect damages or consequential damages; or + +BELGIUM, FRANCE, ITALY, AND LUXEMBOURG + +10. Limitation of Liability + +The following replaces the terms of Section 10 (Limitation of Liability) in its entirety: + +Except as otherwise provided by mandatory law: + +10.1 Items for Which IBM May Be Liable + +IBM's entire liability for all claims in the aggregate for any damages and losses that may arise as a consequence of the fulfillment of its obligations under or in connection with this Agreement or due to any other cause related to this Agreement is limited to the compensation of only those damages and losses proved and actually arising as an immediate and direct consequence of the non-fulfillment of such obligations (if IBM is at fault) or of such cause, for a maximum amount equal to the charges (if the Program is subject to fixed term charges, up to twelve months' charges) Licensee paid for the Program that has caused the damages. + +The above limitation will not apply to damages for bodily injuries (including death) and damages to real property and tangible personal property for which IBM is legally liable. + +10.2 Items for Which IBM Is Not Liable + +UNDER NO CIRCUMSTANCES IS IBM OR ANY OF ITS PROGRAM DEVELOPERS LIABLE FOR ANY OF THE FOLLOWING, EVEN IF INFORMED OF THEIR POSSIBILITY: 1) LOSS OF, OR DAMAGE TO, DATA; 2) INCIDENTAL, EXEMPLARY OR INDIRECT DAMAGES, OR FOR ANY ECONOMIC CONSEQUENTIAL DAMAGES; AND / OR 3) LOST PROFITS, BUSINESS, REVENUE, GOODWILL, OR ANTICIPATED SAVINGS, EVEN IF THEY ARISE AS AN IMMEDIATE CONSEQUENCE OF THE EVENT THAT GENERATED THE DAMAGES. + +10.3 Suppliers and Program Developers + +The limitation and exclusion of liability herein agreed applies not only to the activities performed by IBM but also to the activities performed by its suppliers and Program developers, and represents the maximum amount for which IBM as well as its suppliers and Program developers are collectively responsible. + +GERMANY + +8.1 Limited Warranty + +The following is inserted at the beginning of Section 8.1: + +The Warranty Period is twelve months from the date of delivery of the Program to the original Licensee. + +8.2 Exclusions + +Section 8.2 is deleted in its entirety and replaced with the following: + +Section 8.1 defines IBM's entire warranty obligations to Licensee except as otherwise required by applicable statutory law. + +10. Limitation of Liability + +The following replaces the Limitation of Liability section in its entirety: + +a. IBM will be liable without limit for 1) loss or damage caused by a breach of an express guarantee; 2) damages or losses resulting in bodily injury (including death); and 3) damages caused intentionally or by gross negligence. + +b. In the event of loss, damage and frustrated expenditures caused by slight negligence or in breach of essential contractual obligations, IBM will be liable, regardless of the basis on which Licensee is entitled to claim damages from IBM (including fundamental breach, negligence, misrepresentation, or other contract or tort claim), per claim only up to the greater of 500,000 euro or the charges (if the Program is subject to fixed term charges, up to 12 months' charges) Licensee paid for the Program that caused the loss or damage. A number of defaults which together result in, or contribute to, substantially the same loss or damage will be treated as one default. + +c. In the event of loss, damage and frustrated expenditures caused by slight negligence, IBM will not be liable for indirect or consequential damages, even if IBM was informed about the possibility of such loss or damage. + +d. In case of delay on IBM's part: 1) IBM will pay to Licensee an amount not exceeding the loss or damage caused by IBM's delay and 2) IBM will be liable only in respect of the resulting damages that Licensee suffers, subject to the provisions of Items a and b above. + +13. General + +The following replaces the provisions of 13.g: + +Any claims resulting from this Agreement are subject to a limitation period of three years, except as stated in Section 8.1 (Limited Warranty) of this Agreement. + +The following replaces the provisions of 13.i: + +No right or cause of action for any third party is created by this Agreement, nor is IBM responsible for any third party claims against Licensee, except (to the extent permitted in Section 10 (Limitation of Liability)) for: i) bodily injury (including death); or ii) damage to real or tangible personal property for which (in either case) IBM is legally liable to that third party. + +IRELAND + +8.2 Exclusions + +The following paragraph is added: + +Except as expressly provided in these terms and conditions, or Section 12 of the Sale of Goods Act 1893 as amended by the Sale of Goods and Supply of Services Act, 1980 (the "1980 Act"), all conditions or warranties (express or implied, statutory or otherwise) are hereby excluded including, without limitation, any warranties implied by the Sale of Goods Act 1893 as amended by the 1980 Act (including, for the avoidance of doubt, Section 39 of the 1980 Act). + +IRELAND AND UNITED KINGDOM + +2. Agreement Structure + +The following sentence is added: + +Nothing in this paragraph shall have the effect of excluding or limiting liability for fraud. + +10.1 Items for Which IBM May Be Liable + +The following replaces the first paragraph of the Subsection: + +For the purposes of this section, a "Default" means any act, statement, omission or negligence on the part of IBM in connection with, or in relation to, the subject matter of an Agreement in respect of which IBM is legally liable to Licensee, whether in contract or in tort. A number of Defaults which together result in, or contribute to, substantially the same loss or damage will be treated as one Default. + +Circumstances may arise where, because of a Default by IBM in the performance of its obligations under this Agreement or other liability, Licensee is entitled to recover damages from IBM. Regardless of the basis on which Licensee is entitled to claim damages from IBM and except as expressly required by law without the possibility of contractual waiver, IBM's entire liability for any one Default will not exceed the amount of any direct damages, to the extent actually suffered by Licensee as an immediate and direct consequence of the default, up to the greater of (1) 500,000 euro (or the equivalent in local currency) or (2) 125% of the charges (if the Program is subject to fixed term charges, up to 12 months' charges) for the Program that is the subject of the claim. Notwithstanding the foregoing, the amount of any damages for bodily injury (including death) and damage to real property and tangible personal property for which IBM is legally liable is not subject to such limitation. + +10.2 Items for Which IBM is Not Liable + +The following replaces Items 10.2b and 10.2c: + +b. special, incidental, exemplary, or indirect damages or consequential damages; or + +c. wasted management time or lost profits, business, revenue, goodwill, or anticipated savings. + +Z125-3301-14 (07/2011) diff --git a/ODM/README_config.md b/ODM/README_config.md deleted file mode 100644 index a01a1828..00000000 --- a/ODM/README_config.md +++ /dev/null @@ -1,72 +0,0 @@ -# Configuring IBM Operational Decision Manager 8.10.3 - -These instructions cover the basic configuration of ODM. - -The following architectures are supported for Operational Decision Manager 8.10.3: -- AMD64 (or x86_64), which is the 64-bit edition for Linux x86. - -> **Note**: Rule Designer is installed as an update site from the [Eclipse Marketplace](https://marketplace.eclipse.org/content/ibm-operational-decision-manager-developers-v-8103-rule-designer) into an existing version of Eclipse. - -ODM for production includes five containers corresponding to the following services. - - Decision Center Business Console and Enterprise Console - - Decision Server Console - - Decision Server Runtime - - Decision Server Runner - - (Optional) Internal PostgreSQL DB - -The services require CPU and memory resources. The following table lists the minimum requirements that are used as default values. - -| Service | CPU Minimum (m) | Memory Minimum (Mi) | -| ---------- | ----------- | ------------------- | -| Decision Center | 500 | 1500 | -| Decision Runner | 500 | 512 | -| Decision Server Console | 500 | 512 | -| Decision Server Runtime | 500 | 512 | -| **Total** | **2000** (2CPU) | **3036** (3Gb) | -| (Optional) Internal DB | 500 | 512 | - -### Step 1: Customize a production ready ODM (*Optional*) - -The installation of Operational Decision Manager 8.10.3 can be customized by changing and adding configuration parameters. The default values are appropriate to a production environment, but it is likely that you want to configure at least the security of your kubernetes deployment. - -Make a note of the name and value for the different parameters you want to configure so that it is at hand when you enter it in the custom resource YAML file. - -Go to the [IBM Cloud Pak for Automation 20.0.x](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/con_odm_prod.html) Knowledge Center and choose which customizations you want to apply. - * [Defining the security certificate](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_replace_security_certificate.html) - * [Configuring user access](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_config_user_access.html) - * [Configuring a custom external database](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_custom_external_db.html) - * [Configuring the ODM event emitter](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_custom_emitters.html) - * [Configuring Decision Center customization](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_custom_dc.html) - * [Configuring Decision Center time zone](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_set_jvmargs.html) - * [Configuring the execution unit (XU)](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_configuring_xu.html) - -> **Note**: The [configuration](configuration) folder provides sample configuration files that you might find useful. Download the files and edit them for your own customizations. - -### Step 2: Configure the custom resource YAML file for your ODM instance - -Before you configure, make sure that you have prepared your environment. For more information, see [Preparing to install ODM for production](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_preparing_odmk8s.html). - -In your `descriptors/my_icp4a_cr.yaml` file, update the `odm_configuration` section with the configuration parameters from *Step 1*. You can refer to the [`default-values.yaml`](configuration/default-values.yaml) file to find the default values for each ODM parameter and customize these values in your file. - -### Step 3: Complete the installation - -When you have finished editing the configuration file, go back to the relevant install or update page to configure other components and complete the deployment with the operator. - -Install pages: - - [Managed OpenShift installation page](../platform/roks/install.md#step-6-configure-the-software-that-you-want-to-install) - - [OpenShift installation page](../platform/ocp/install.md#step-6-configure-the-software-that-you-want-to-install) - - [Certified Kubernetes installation page](../platform/k8s/install.md#step-6-configure-the-software-that-you-want-to-install) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md#step-1-modify-the-software-that-is-installed) - - [Certified Kubernetes installation page](../platform/k8s/update.md) - -### Step 4: Manage your Operational Decision Manager deployment - -If you customized the default user registry, you must synchronize the registry with the Decision Center database. For more information, see -[Synchronizing users and groups in Decision Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_synchronize_users.html). - -You might need to update an ODM deployment after it is installed. Use the following tasks in IBM Knowledge Center to update a deployment whenever you need, and as many times as you need. - * [Customizing JVM arguments](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/op_topics/tsk_set_jvmargs.html) - * [Customizing log levels](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/op_topics/tsk_odm_custom_logging.html) diff --git a/ODM/README_migrate.md b/ODM/README_migrate.md deleted file mode 100644 index dd01b563..00000000 --- a/ODM/README_migrate.md +++ /dev/null @@ -1,46 +0,0 @@ -# Migrating IBM Operational Decision Manager 8.10.x data to 8.10.3 - -## Step 1: Review the database configuration parameters - -Operational Decision Manager persists data in a database. An external Db2 or PostgreSQL database uses the following configuration parameters: - - - Server type: **externalDatabase.type** - - Server name: **externalDatabase.serverName** - - Port: **externalDatabase.port** - - Database name: **externalDatabase.databaseName** - - Secret credentials: **externalDatabase.secretCredentials** - -Note the name of the secret that encrypts the database user and password that is used to secure access to the database. - -A customized database uses the following configuration parameters: - - - Data source secret: **externalCustomDatabase.datasourceRef** - - Persistent Volume Claim to access the JDBC database driver: **externalCustomDatabase.driverPvc** - -If you customized the Decision Center Business console with your own implementation of dynamic domains, custom value editors, or custom ruleset extractors you must note the name of the YAML file you previously created, for example *custom-dc-libs-pvc.yaml*. - -An internal database uses a predefined persistent volume claim (PVC) or Kubernetes dynamic provisioning. You must have a persistent volume (PV) already created with accessMode and ReadWriteOnce attributes for Operational Decision Manager containers. Dynamic provisioning uses the default storageClass defined by the Kubernetes admin or by using a custom storageClass that overrides the default. - -Predefined PVC - - - **internalDatabase.persistence.enabled**: true (default) - - **internalDatabase.persistence.useDynamicProvisioning**: false (default) - -Kubernetes dynamic provisioning - - - **internalDatabase.persistence.enabled**: true (default) - - **internalDatabase.persistence.useDynamicProvisioning**: true - -## Step 2: Review LDAP settings - -Make a note of the Lightweight Directory Access Protocol (LDAP) parameters that are used to connect to the LDAP server to validate users. The Directory service server has a number of mandatory configuration parameters, so save these values somewhere and refer to them when you configure the custom resource YAML file. For more information, see [LDAP configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html). - -## Step 3: Review other customizations you applied - -If you customized your Operational Decision Manager installation, go to the [IBM Cloud Pak for Automation 20.0.x](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/con_odm_prod.html) Knowledge Center and remind yourself of the customizations you applied and need to apply again in the new ODM instance. - -## Step 4: Go back to the platform readme to migrate other components - -- [Managed OpenShift migrate page](../platform/roks/migrate.md) -- [OpenShift migrate page](../platform/ocp/migrate.md) -- [Kubernetes migrate page](../platform/k8s/migrate.md) diff --git a/ODM/README_upgrade.md b/ODM/README_upgrade.md deleted file mode 100644 index fb505f4b..00000000 --- a/ODM/README_upgrade.md +++ /dev/null @@ -1,102 +0,0 @@ -# Upgrade IBM Operational Decision Manager from 19.0.3 to 20.0.1 - -## Update the custom resource YAML file used in 19.0.3 - -This document describes the configuration parameter changes between 19.0.3 and 20.0.1 that might affect your ODM upgrade. - -### Shared configuration parameter changes - -In the custom resource file, make sure that the `spec` section includes the following parameters: - -| Custom Resource parameter | Comment | -| ------------------------------------------------------------------------------ | ------------------| -| shared_configuration.sc_deployment_type | production or non-production | -| shared_configuration.sc_deployment_platform | OCP, ROKS, or empty. | -| shared_configuration.images.keytool_init_container.tag | New in 20.0.1 for ODM. Used to manage certificates. | -| shared_configuration.images.keytool_init_container.repository | New in 20.0.1 for ODM. Used to manage certificates. | -| shared_configuration.images.pull_policy | New in 20.0.1 for ODM. Used to manage certificates. | -| shared_configuration.image_pull_secrets | New in 20.0.1 for ODM. Used to manage certificates. | -| shared_configuration.root_ca_secret | New in 20.0.1 for ODM. Used to manage certificates. | - -For more information, see [Shared configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_shared_config_params.html). - -### Datasource configuration parameter changes - -The following ODM configuration parameters are new in 20.0.1. -If you use external DB2 Database, you must add this following section in your custom resource with their values in replacement of the `odm_configuration.externalDatabase` parameters. - -| Custom Resource parameter | -| ------------------------------------------------------------------------------ | -| datasource_configuration.dc_odm_datasource.dc_database_type | -| datasource_configuration.dc_odm_datasource.database_servername | -| datasource_configuration.dc_odm_datasource.dc_common_database_name | -| datasource_configuration.dc_odm_datasource.dc_common_database_port | -| datasource_configuration.dc_odm_datasource.dc_common_database_instance_secret | - -These parameters are used to configure the datasource for ODM. For more information, see [ODM datasource parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_parameters_prod.html). - -### LDAP configuration parameters - -The following LDAP configuration parameters can be used for ODM in 20.0.1. If you want to use an LDAP with ODM you need to set values for them. - -If your custom resource contains a `ldap_configuration` section and if you have not set the `odm_configuration.customization.authSecretRef`, the Basic Registry and LDAP authentication will be used by ODM in 20.0.1. If you want to have a fine grain Authentication, follow the instructions to [Configuring User Access](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_config_user_access.html) - -| Custom Resource parameter | -| ------------------------------------------------------------------------------ | -| ldap_configuration.lc_selected_ldap_type | -| ldap_configuration.lc_ldap_server | -| ldap_configuration.lc_ldap_port | -| ldap_configuration.lc_bind_secret | -| ldap_configuration.lc_ldap_base_dn | -| ldap_configuration.lc_ldap_ssl_enabled | -| ldap_configuration.lc_ldap_ssl_secret_name | -| ldap_configuration.lc_ldap_user_name_attribute | -| ldap_configuration.lc_ldap_user_display_name_attr | -| ldap_configuration.lc_ldap_group_base_dn | -| ldap_configuration.lc_ldap_group_name_attribute | -| ldap_configuration.lc_ldap_group_display_name_attr | -| ldap_configuration.lc_ldap_group_membership_search_filter | -| ldap_configuration.lc_ldap_group_member_id_map | -| ldap_configuration.ad.lc_user_filter | -| ldap_configuration.ad.lc_group_filter | -| ldap_configuration.tds.lc_user_filter | -| ldap_configuration.tds.lc_group_filter | - - -For more information, see [LDAP configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html). - - -### Update ODM docker images - -Update the ODM docker image tags to point to the new 20.0.1 images. - -| Custom Resource parameter | Comment | -| ------------------------------------------------------------------------------ | ------------------| -| odm_configuration.images.tag | update to 8.10.3.0_ICP2001 | -| odm_configuration.version | update to 20.0.1 | - - -### New parameters in ODM configuration -In 20.0.1, changes apply to the following new parameters in `odm_configuration` if needed. - -| Custom Resource parameter | -| --------------------------------------------- | -| odm_configuration.decisionServerRuntime.xuConfigRef | -| odm_configuration.oidc.enabled | -| odm_configuration.oidc.serverUrl | -| odm_configuration.oidc.adminRef | -| odm_configuration.oidc.redirectUrisRef | -| odm_configuration.oidc.clientRef | -| odm_configuration.oidc.provider | -| odm_configuration.oidc.allowedDomains | - -For more information, see [Optimizing the execution unit (XU)](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_configuring_xu.html) and [Configuring user access with UMS documentation](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/tsk_config_odm_ums.html). - - -## Complete the upgrade -Return to the appropriate upgrade page to configure other components and complete the deployment using the operator. - -Upgrade pages: - - [Managed OpenShift upgrade page](../platform/roks/upgrade.md) - - [OpenShift upgrade page](../platform/ocp/upgrade.md) - - [Certified Kubernetes upgrade page](../platform/k8s/upgrade.md) diff --git a/ODM/configuration/default-values.yaml b/ODM/configuration/default-values.yaml deleted file mode 100644 index 8f6784ae..00000000 --- a/ODM/configuration/default-values.yaml +++ /dev/null @@ -1,170 +0,0 @@ -# Default values for odm installation. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: odm-demo - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba -spec: - appVersion: 20.0.1 - odm_configuration: - image: - repository: "" - pullPolicy: IfNotPresent - tag: 8.10.3.0_ICP2001 - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod - ## Ex : pullSecrets: admin.registrykey - pullSecrets: - - ## Architecture - e.g. amd64, ppc64le. If left empty, the architecture will be determined automatically. - ## You can use kubectl version command to determine the architecture on the desired worker node. - arch: "" - - service: - enableTLS: true - type: NodePort - - decisionServerRuntime: - enabled: true - replicaCount: 1 - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 2 - memory: 4096Mi - - decisionServerConsole: - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 2 - memory: 1024Mi - - decisionCenter: - enabled: true - persistenceLocale: en_US - replicaCount: 1 - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 2 - memory: 4096Mi - - decisionRunner: - enabled: true - replicaCount: 1 - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 2 - memory: 4096Mi - - internalDatabase: - databaseName: odmdb - secretCredentials: "TOBEFILL" - persistence: - enabled: true - useDynamicProvisioning: false - storageClassName: "" - resources: - requests: - storage: 5Gi - securityContext: - runAsUser: 0 - resources: - requests: - cpu: 500m - memory: 512Mi - limits: - cpu: 2 - memory: 4096Mi - - externalDatabase: - type: "" - serverName: "" - databaseName: "" - user: "" - password: "" - port: "" - - externalCustomDatabase: - datasourceRef: - driverPvc: - - readinessProbe: - initialDelaySeconds: 5 - periodSeconds: 5 - failureThreshold: 45 - timeoutSeconds: 5 - - livenessProbe: - initialDelaySeconds: 300 - periodSeconds: 10 - failureThreshold: 10 - timeoutSeconds: 5 - - customization: - securitySecretRef: - baiEmitterSecretRef: - authSecretRef: - dedicatedNodeLabel: - - productName: IBM Cloud Pak for Automation - productID: 5737-I23 - kubeVersion: DBAMC - - # Shared parameters LDAP - customization section required -# customization: -# securitySecretRef: -# authSecretRef: odm-demo-nopv-odm-ldap-secret - - # Shared parameters external Database - section required ONLY for Oracle -# externalCustomDatabase: -# datasourceRef: odm-demo-nopv-odm-oracle-secret -# driverPvc: customdatasource-pvc - - # Shared parameters external Database - Datasource configuration for DB2 -# datasource_configuration: -# dc_odm_datasource: -# dc_database_type: "db2" -# database_servername: "9.30.222.169" -# dc_common_database_port: "50000" -# dc_common_database_name: "testdb" -# dc_common_database_instance_secret: odm-db-secret - - # Shared parameters LDAP - Example for Microsoft Active Directory -# ldap_configuration: -# ## the candidate value is "IBM Security Directory Server" or "Microsoft Active Directory" -# lc_selected_ldap_type: "Microsoft Active Directory" -# lc_ldap_server: "itdctest01w.fr.eurolabs.ibm.com" -# lc_ldap_port: "389" -# lc_ldap_base_dn: "OU=Administrator_Users,OU=Users,OU=SWG Lab France,DC=jade,DC=test" -# lc_bind_secret: odm-ldap-ad-secret # secret is expected to have ldapUsername and ldapPassword keys -# lc_ldap_ssl_enabled: false -# lc_ldap_ssl_secret_name: "" -# lc_ldap_user_name_attribute: "*:sAMAccountName" -# lc_ldap_user_display_name_attr: "cn" -# lc_ldap_group_base_dn: "OU=Groups,OU=SWG Lab France,DC=jade,DC=test" -# lc_ldap_group_name_attribute: "*:cn" -# lc_ldap_group_display_name_attr: "cn" -# lc_ldap_group_membership_search_filter: “” -# lc_ldap_group_member_id_map: "memberOf:member" -# ad: -# lc_user_filter: "(&(sAMAccountName=%v)(objectClass=user))" -# lc_group_filter: "(&(sAMAccountName=%v)(objectClass=group)) -# tds: -# lc_user_filter: -# lc_group_filter: diff --git a/ODM/configuration/sample-values-custom-configuration.yaml b/ODM/configuration/sample-values-custom-configuration.yaml deleted file mode 100644 index 628b7067..00000000 --- a/ODM/configuration/sample-values-custom-configuration.yaml +++ /dev/null @@ -1,51 +0,0 @@ -# Sample values for odm installation using custom configuration. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: odm-demo-external-custom-db - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba -spec: - appVersion: 20.0.1 - odm_configuration: - image: - repository: "" - pullPolicy: IfNotPresent - tag: 8.10.3.0_ICP2001 - pullSecrets: "" - decisionServerRuntime: - # Configuring the execution unit (XU) - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.offerings/topics/tsk_configuring_xu.html - xuConfigRef: my-odm-xu-configmap - - decisionCenter: - # Configuring Decision Center customization - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.offerings/topics/tsk_custom_emitters.html - customlibPvc: - - # Customizing a Decision Center time zone - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.managing/op_topics/tsk_set_jvmargs.html - jvmOptionsRef: my-odm-dc-jvm-options-configmap - - # Configuring a custom external database - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.offerings/topics/tsk_custom_external_db.html - externalCustomDatabase: - datasourceRef: customdatasource-secret - driverPvc: customdatasource-pvc - - customization: - # Defining the security certificate - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.offerings/topics/tsk_replace_security_certificate.html - securitySecretRef: mysecuritysecret - - # Configuring the ODM event emitter - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.offerings/topics/tsk_custom_emitters.html - baiEmitterSecretRef: mybaieventsecret - - # Configuring the LDAP and user registry - # Following instructions at https://www.ibm.com/support/knowledgecenter/SSYHZ8_19.0.x/com.ibm.dba.offerings/topics/con_config_user_registry.html - authSecretRef: my-auth-secret diff --git a/ODM/configuration/security/openid/sample-openid-configuration.yaml b/ODM/configuration/security/openid/sample-openid-configuration.yaml index 5e0adb7b..fe6661de 100644 --- a/ODM/configuration/security/openid/sample-openid-configuration.yaml +++ b/ODM/configuration/security/openid/sample-openid-configuration.yaml @@ -14,7 +14,6 @@ spec: image: repository: "" pullPolicy: IfNotPresent - tag: 8.10.3.0_ICP2001-amd64 oidc: # Configuring the OPENID parameters diff --git a/ODM/configuration/security/sample-webSecurity-OIDC.xml b/ODM/configuration/security/sample-webSecurity-OIDC.xml index 0407a66e..dcd5b918 100644 --- a/ODM/configuration/security/sample-webSecurity-OIDC.xml +++ b/ODM/configuration/security/sample-webSecurity-OIDC.xml @@ -1,47 +1,32 @@ -   - - - - + +   - - + + + + + + + + - - + + + + + + + + - - - - - - - - + + + + + + + + diff --git a/ODM/configuration/security/sample-webSecurity-role-extension.xml b/ODM/configuration/security/sample-webSecurity-role-extension.xml new file mode 100644 index 00000000..279074a6 --- /dev/null +++ b/ODM/configuration/security/sample-webSecurity-role-extension.xml @@ -0,0 +1,88 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/README.md b/README.md index 9a8b3a4a..44614e95 100644 --- a/README.md +++ b/README.md @@ -1,64 +1,7 @@ -# IBM Cloud Pak for Automation 20.0.1 on Certified Kubernetes +# IBM Cloud Pak for Automation 20.0.2 on Certified Kubernetes -This repository includes folders and resources to help you install the Cloud Pak for Automation software. Installation of the software components is done with the Cloud Pak operator. For demonstration purposes or to get started with the Cloud Pak, you can install a single capability of Digital Business Automation (DBA). For enterprise purposes, you must enable one or more components in a custom resource file and install them in a specified namespace. +This repository includes folders and resources to help you install the IBM Cloud Pak for Automation capabilities. Installation of the capabilities is done with the Cloud Pak operator. -This README is divided into the following sections. - -- [Install the Cloud Pak for demonstration purposes](README.md#install-the-cloud-pak-for-demonstration-purposes) -- [Install the Cloud Pak for enterprise purposes](README.md#install-the-cloud-pak-for-enterprise-purposes) -- [Legal Notice](README.md#legal-notice) - -## Install the Cloud Pak for demonstration purposes - -> **Important:** The Cloud Pak capabilities are presented as patterns. A single pattern is installed in a specified namespace. You cannot install more than one pattern in a single namespace. In 20.0.1, patterns cannot be used to install the Cloud Pak for enterprise purposes. - -The "demo" deployment type reduces the number of steps that you need to do as it uses an Ansible role to create persistent storage that is allocated to a node. If you want to install the Cloud Pak components into a cluster with external storage, see [Install the Cloud Pak for enterprise purposes](README.md#install-the-cloud-pak-for-enterprise-purposes). - -Click [Next](demo/README.md) to follow the instructions. - -## Install the Cloud Pak for enterprise purposes - -The following software can be installed by the Cloud Pak operator. It is important that you **take note of the dependencies** before you proceed to the platform instructions. - -| Folder | Component name | Version in 20.0.1 | -| :--- | :--- | ---: | -| AAE | IBM Business Automation Application Engine | 20.0.1 | -| ACA | IBM Business Automation Content Analyzer | 20.0.1 | -| ADW | IBM Automation Digital Worker | 20.0.1 | -| BAI | IBM Business Automation Insights | 20.0.1 | -| BAN | IBM Business Automation Navigator | 20.0.1 | -| BAS | IBM Business Automation Studio | 20.0.1 | -| FNCM | IBM FileNet Content Manager | 5.5.4 | -| IAWS | IBM Automation Workstream Services | 20.0.1 | -| ODM | IBM Operational Decision Manager | 8.10.3 | -| UMS | User Management Service | 20.0.1 | - -The following table shows dependencies between the components. A mandatory component is indicated in each column with an "M". Optional installation is indicated with an "O". - -| | ACA needs | ADW needs | BAN needs | BAS needs | FNCM needs | IAWS needs | ODM needs | -| :--- | :---: | :---: | :---: | :---: | :---: | :---: | :---: | -| AAE | | | | M(8,9) | | M(8) | | -| ACA | - | O(6) | | | | | | -| BAI | | O(3) | | | O(3) | | O(3) | -| BAN | | | - | | M(7) | M(7) | | -| BAS | M(4) | M(2,4) | | - | | M(4) | O(2,5) | -| FNCM | | | | | - | M(CMIS/CPE only) | | -| ODM | | O(6) | | | | | - | -| UMS | M(1) | M(1) | O(1) | M(1) | O(1) | M(1) | O(1) | - -The type of integration is indicated with the following numbers: - -| 1. SSO/Authentication | 4. Designer integration in Studio | 7. Runtime view | -| :--- | :--- | :--- | -| **2. Registration to Resource Registry** | **5. Toolkit for App Designer** | **8. App execution** | -| **3. Event emitter/dashboard** | **6. Skill execution** | **9. Test and deploy** | - -Use the following links to go to the platform on which you want to use the Cloud Pak. On each platform you must configure the operator manifest files to set up an operator instance on your cluster. You can then select and add configuration parameters for the software that you want to install in a custom resources file. - -- [IBM Cloud Public](platform/roks/README.md) -- [Red Hat OpenShift](platform/ocp/README.md) -- [Other Certified Kubernetes platforms](platform/k8s/README.md) - -## Legal Notice +For information and instructions to install, upgrade, manage, and administer Cloud Pak for Automation, go to [IBM Knowledge Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/topics/con_installing.html). Legal notice for users of this repository [legal-notice.md](legal-notice.md). diff --git a/UMS/README_config.md b/UMS/README_config.md deleted file mode 100644 index 6de9f391..00000000 --- a/UMS/README_config.md +++ /dev/null @@ -1,213 +0,0 @@ -# Configuring User Management Service 20.0.1 - -## Planning UMS installation - -| Environment size | CPU Minimum (m) | Memory Minimum (Mi) | recommended number of pods | -| ---------- | ----------- | ------------------- | -------------------------- | -| Small | 500 | 512 | 2 | -| Medium | 1000 | 1024 | 2 | -| Large | 2000 | 2048 | 3 | - - -## Prerequisites - -Make sure that you specified the mandatory configuration parameters `appVersion: 20.0.1` in `spec` and `sc_deployment_platform` in `shared_configuration`. -If you deploy on Red Hat OpenShift, specify - -```yaml -spec: - appVersion: 20.0.1 - shared_configuration: - sc_deployment_platform: OCP -``` - -otherwise specify - -```yaml -spec: - appVersion: 20.0.1 - shared_configuration: - sc_deployment_platform: NonOCP -``` - -For information about shared configuration parameters and sample values refer to -[Shared configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_shared_images_params.html). - -## Step 1: Generate the UMS secret - -To avoid passing sensitive information in configuration files, you must create a secret manually before you deploy UMS. -Copy the following as ums-secret.yaml, then edit it to specify the required user identifiers and passwords. - -**Note:** The sample below includes sample values for passwords. For `ibm-dba-ums-secret` choose passwords that reflect your security requirements. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: ibm-dba-ums-secret -type: Opaque -stringData: - adminUser: "umsadmin" - adminPassword: "password" - oauthDBUser: "oauthDBUser" - oauthDBPassword: "oauthDBPassword" - tsDBUser: "tsDBUser" - tsDBPassword: "tsDBPassword" -``` - - -| Parameter | Description | -| ------------------------------- | --------------------------------------------- | -| `adminUser` | User ID of the UMS admin user to create | -| `adminPassword` | Password for the UMS admin user | -| `oauthDBUser` | User ID for the OAuth database | -| `oauthDBPassword` | Password for the OAuth database | -| `tsDBUser` | User ID for the TeamServer database | -| `tsDBPassword` | Password for the TeamServer database | - -Only specify the database settings if you are not using the internal derby database. -**Note:** Derby can only be used for test scenarios where persistence and failover are not required. It will not work in scenarios with more than one UMS pod. Data is lost when the pod is restarted. - -Apart from the database values that relate to your specific database setup, you can choose all secret values freely. - -After modifying the values, save ums-secret.yaml and create the secrets by running the following command - -```bash -oc create -f ums-secret.yaml -``` - -**Note:** `ibm-dba-ums-secret` is passed to the Operator by specifying the `ums_configuration.admin_secret_name` property, as described in section [Configure UMS](#Step-4) - - -## Step 2: Configure the UMS datasource - -Follow instructions in [Configure Db2 as the UMS datasource](README_config_db2.md) to configure Db2. - -Follow instructions in [Configure Oracle as the UMS datasource](README_config_oracle.md) to configure Oracle. - -## Step 3: Configure LDAP - -In section `ldap_configuration`, adapt the LDAP configuration parameter values to match your LDAP. - -For information about LDAP configuration parameters and sample values refer to -[Configuring the LDAP and user registry](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html). - -## Step 3a (optional): Configure LDAP over SSL - -Follow instructions in [Configure LDAP over SSL](README_config_ldap_ssl.md). - - -## Step 4: Configure UMS -In section `ums_configuration` adapt the UMS-specific configuration - -```yaml -ums_configuration: - existing_claim_name: - replica_count: 2 - service_type: Route - hostname: - port: 443 - images: - ums: - repository: cp.icr.io/cp/cp4a/ums/ums - tag: 20.0.1 - admin_secret_name: ibm-dba-ums-secret - # optional for secure communication with UMS - external_tls_secret_name: ibm-dba-ums-external-tls-secret - # optional for secure communication with UMS - external_tls_ca_secret_name: ibm-dba-ums-external-tls-ca-secret - # optional for secure communication with UMS - external_tls_teams_secret_name: ibm-dba-ums-external-tls-teams-secret - # optional for secure communication with UMS - external_tls_scim_secret_name: ibm-dba-ums-external-tls-scim-secret - # optional for secure communication with UMS - external_tls_sso_secret_name: ibm-dba-ums-external-tls-sso-secret - oauth: - # optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret - client_manager_group: - # optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret - token_manager_group: - # optional: lifetime of OAuth access_tokens. default is 7200s - access_token_lifetime: - # optional: lifetime of app-tokens. default is 366d - app_token_lifetime: - # optional: lifetime of app-passwords. default is 366d - app_password_lifetime: - # optional: maximimum number of app-tokens or app-passwords per client. default is 100 - app_token_or_password_limit: - # optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 - client_secret_encoding: - resources: - limits: - cpu: 500m - memory: 512Mi - requests: - cpu: 200m - memory: 256Mi - ## Horizontal Pod Autoscaler - autoscaling: - enabled: true - min_replicas: 2 - max_replicas: 5 - target_average_utilization: 98 - use_custom_jdbc_drivers: false - use_custom_binaries: false - custom_secret_name: - custom_xml: - logs: - console_format: json - console_log_level: INFO - console_source: message,trace,accessLog,ffdc,audit - trace_format: ENHANCED - trace_specification: "*=info" -``` - -For information about UMS configuration parameters and their default values, see -[UMS Configuration Parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ums_params_ums.html) - -**Note:** Either `ums_configuration.hostname` or `shared_configuration.sc_deployment_hostname_suffix` must be configured. To avoid definining explicit hostnames for different roles, the recommendation is to configure `shared_configuration.sc_deployment_hostname_suffix` and leave `ums_configuration.hostname` blank. In this case, `ums_configuration.hostname` will be generated from `shared_configuration.sc_deployment_hostname_suffix`. - -**Note:** There are several options for the `external_tls_secret_name`, `external_tls_ca_secret_name`, `external_tls_sso_secret_name` `external_tls_teams_secret_name`, `external_tls_scim_secret_name`: -* Option 1: Do not create the secret and remove the property from the Custom Resource. In this case, the secret will be generated. -If you do not create the secret and do not remove the property from the Custom Resource, an error will be reported during deployment. -* Option 2: Create the secret and specify the name of the secret in the Custom Resource. In this case, the secret that you provide will be used. - - -## Step 5a (optional): Configure secure communication with UMS - -See [Configuring secure communication with UMS](README_config_SSL.md). - - -## Step 6 (optional): Configure UMS to delegate authentication to an IdP - -The User Management Service (UMS) provides single sign-on for Cloud Pak for Automation components. -The single sign-on is based on OpenID Connect (OIDC). UMS can also be configured to delegate authentication to a third-party Identity Provider (IdP). - -The following figure summarizes the authentication flows that are supported by UMS: -1. A browser attempts to access any of the IBM Cloud Pak for Automation components. -1. Depending on the OpenID Connect (OIDC) configuration, there is a redirect to UMS. -1. Depending on the UMS configuration, one of the following takes place: - - a. UMS authenticates the user against LDAP - - b. UMS delegates authentication to IAM (OIDC) provided by IBM Common Services - - c. UMS delegates authentication to an IdP (OIDC or SAML) -4. Upon completing the OIDC flow, the platform component has a session with the browser and the necessary tokens that allow invoking other platform components. - -![Authentication Flows](images/authentication-flows.jpg) - -To connect UMS to your preferred Identity Provider, follow the corresponding instructions: -* [Configure UMS to delegate authentication to IAM provided by IBM Common Services](README_config_IAM.md) -* [Configure UMS to delegate authentication to an OIDC Identity Provider](README_config_OIDC.md) - -## Step 7: Complete the installation - -Return to the appropriate install or update page to configure other components and complete the deployment with the operator. - -Install pages: - - [Managed OpenShift installation page](../platform/roks/install.md) - - [OpenShift installation page](../platform/ocp/install.md) - - [Certified Kubernetes installation page](../platform/k8s/install.md) - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) diff --git a/UMS/README_config_IAM.md b/UMS/README_config_IAM.md deleted file mode 100644 index 7dc0f7ec..00000000 --- a/UMS/README_config_IAM.md +++ /dev/null @@ -1,234 +0,0 @@ -# Configure the User Management Service to delegate authentication to Identity and Access Manager (IAM) provided by IBM Common Services - -## Prerequisites -IAM is accessible. - -IAM is connected to an LDAP that will be used to authenticate users. - -## Introduction -To configure UMS to delegate authentication to IAM, you must perform the following steps: -1. Register UMS as an OIDC client with IAM -2. Create IAM secrets in OpenShift -3. Configure UMS to delegate authentication - - -## Register UMS as OIDC client of IAM -Login to your OpenShift environment where IAM is deployed. - -### Determine the OAUTH client registration secret - -Determine the OAUTH2_CLIENT_REGISTRATION_SECRET by running the following command: -``` -oc -n kube-system get secret platform-oidc-credentials -o yaml -``` - -As output you will see something like the following: - -``` -apiVersion: v1 -data: - OAUTH2_CLIENT_REGISTRATION_SECRET: - WLP_CLIENT_ID: - WLP_CLIENT_SECRET: - WLP_SCOPE: -kind: Secret -metadata: - creationTimestamp: "2019-12-18T16:02:32Z" - labels: - app: auth-idp - chart: auth-idp-3.4.0 - component: auth-idp - heritage: Tiller - release: auth-idp - name: platform-oidc-credentials - namespace: kube-system - resourceVersion: "29985" - selfLink: /api/v1/namespaces/kube-system/secrets/platform-oidc-credentials - uid: cccf8d27-21af-11ea-af97-0050569bd162 -type: Opaque - -``` - -Decode the value of the ```OAUTH2_CLIENT_REGISTRATION_SECRET```. You will need it later to authenticate the ```oauthadmin``` when the OIDC client is being registered. - - -### Generate client id and client secret -Generate a unique client id, for example, a random 32-character alphanumeric string. The client id will be the identifier of UMS. -It must be unique across all clients that IAM manages. - -Generate a sufficiently random client secret. The client secret will be used by UMS to authenticate against IAM. - - - -### Construct the client registration payload - -To construct the client registration payload execute - -``` -oc get configmaps -n kube-system registration-json -o jsonpath='{.data.*}' > registration.json -``` - -Edit the file registration.json -* replace the ```client_id``` with the id value that you generated in the previous step -* replace the ```client_secret``` with the secret value that you generated in the previous step -* Add the UMS URL to the list of ```trusted_uri_prefixes```, for example, https:// -* Add the URL https:///oidcclient/redirect/ to the list of ```redirect_uris``` - - -``` -{ -"token_endpoint_auth_method":"client_secret_basic", -"client_id": , -"client_secret": , -"scope":"openid profile email", -"grant_types":[ - "authorization_code", - "client_credentials", - "password", - "implicit", - "refresh_token", - "urn:ietf:params:oauth:grant-type:jwt-bearer" -], -"response_types":[ - "code", - "token", - "id_token token" -], -"application_type":"web", -"subject_type":"public", -"post_logout_redirect_uris":[ - "https://:" ], -"preauthorized_scope":"openid profile email general", -"introspect_tokens":true, -"trusted_uri_prefixes":[ - "https://",":", "https://" ], -"redirect_uris":[ - "https://:/auth/liberty/callback", "https://127.0.0.1:443/idauth/oidc/endpoint/OP", "https://:/oidc/endpoint/OP/authorize", "https://:/oidcclient/redirect/"] -} -``` - -### Register the OIDC client - -Run the followng command to register the OIDC client: -``` -curl -i -k -X POST -u oauthadmin: -H "Content-Type: application/json" --data @registration.json https://:/idauth/oidc/endpoint/OP/registration -``` - -UMS is now registered as an OIDC client with IAM. - - -## Create IAM secrets - -### Obtain the IAM signer certificate -* Login to the OpenShift Administrator UI. -* Select the project ```kube-system``` -* Navigate to Workloads > Secrets -* Select the ```icp-management-ingress-tls-secret``` -* In section Data copy the contents of ```tls.crt```. - -### Create a secret that contains the IAM signer certificate - -Create a configuration file for the secret. -Specify a name for the secret, for example, ```iam-tls```. -In section ```stringData``` add the IAM signer certificate as the value of the property ```tls.crt```. - -``` -apiVersion: v1 -kind: Secret -metadata: - name: iam-tls -type: Opaque -stringData: - tls.crt: |+ - -----BEGIN CERTIFICATE----- - MIIFMDCCAxigAwIBAgIRAJP7QsFhLJkEv6a8TFmV9NwwDQYJKoZIhvcNAQELBQAw - YzELMAkGA1UEBhMCVVMxETAPBgNVBAgMCE5ldyBZb3JrMQ8wDQYDVQQHDAZBcm1v - bmsxGjAYBgNVBAoMEUlCTSBDbG91ZCBQcml2YXRlMRQwEgYDVQQDDAt3d3cuaWJt - LmNvbTAeFw0xOTEyMTgxNjAyMTdaFw0yMDAzMTcxNjAyMTdaMDQxFTATBgNVBAoT - DGNlcnQtbWFuYWdlcjEbMBkGA1UEAxMSbWFuYWdlbWVudC1pbmdyZXNzMIIBIjAN - BgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA5BcGDQUxzcFIq9UhiMSo7G/BNUQX - sUgtpbVyrrfKAp73Tg/Y+HVduok1GOkdDhDjNGikuQtXFrudehvzKzcpS/WWI9t9 - BJFxHS39X82UxxH6rRzOJIHWsnsedkFgI8rI99I1347SAYNNtYaZmne+JLMJ/RB9 - Hhy2UvON3RKiJ/pIxY7UYkmK8f+kMWHw/FbKGqCSR/0TaNvDr+vft4ANLXRF6gXF - Ih3Ee0h2BbihjyYU1d0PSj8whquC2V0x5qiyu/dWMYlSvvqJCWZZv5XIxDbm4muI - cbLFVR0+8eZ8sjBoRDMUSM4KUsQsT6wdd+iw8RaEGSOYWwWoJS7Rxu8kxQIDAQAB - o4IBDDCCAQgwEwYDVR0lBAwwCgYIKwYBBQUHAwEwDAYDVR0TAQH/BAIwADAfBgNV - HSMEGDAWgBSKi6oWBNLWS5M/RHLSqVAdLSYepDCBwQYDVR0RBIG5MIG2ghJtYW5h - Z2VtZW50LWluZ3Jlc3OCNmljcC1jb25zb2xlLmFwcHMuYXNoMi00MmdhLW9jcy5w - dXJwbGUtY2hlc3RlcmZpZWxkLmNvbYIiaWNwLW1hbmFnZW1lbnQtaW5ncmVzcy5r - dWJlLXN5c3RlbYImaWNwLW1hbmFnZW1lbnQtaW5ncmVzcy5rdWJlLXN5c3RlbS5z - dmOCFmljcC1tYW5hZ2VtZW50LWluZ3Jlc3OHBH8AAAEwDQYJKoZIhvcNAQELBQAD - ggIBAB3GoxrF4lcsQvuv8Fwfo7Zln3HlnE64MYBbUR+LOA7o/7vkIV7f0/t3+hQX - 3zrCoJO2OMzDK90I2Hc9/fLbOeuPQHuEJymPAuJZDFQh7wk6C2/YW3lsUi1H20r9 - 64OKG+kxUvPbA7pXpKu7VbW2U0llAqXCWZSV5Xpd6C4ue4WIrAxENm7mbbUd+X1h - 0kGrWVafOu2rXC5B4Dt+pUneC0BMdMP2OMMje1Vazpm8M9WTj1xLSgUsYTuvd9mS - Co7nzTLSEZ6muQyy+glMa2LqzIXJk313OgZz/58NZyBELOTuaxxkmDKeyxS++Moc - iQjS9YtLyglkqUZlePU9M4tVUYHFah/SWa+kyxgqjljREug0qZ8SuY175vTWrxet - F5yTyrzDb8ilmVCLpqGmQ0oahtqAS1PspvzeVJqsWCXlYUjEURmr25phbgYNsMRz - 5EPIXyUsZ1Amv+cJsfB9/qcmlaeePoIXIpahsmJwQFliLiYW++Ckxam7YbllwK1T - MViwrfwd9i+5MAGp7us36msIzdH957C2jUMbvqsHxtBW3UoShkcGvTgm4O5t+aOD - wVi5jq/I1//W/E6rRqUzEcVOzpXncUATE6umdxuv2nPDajV9Ep+/si1WiHlewotL - TTEzzhGZiMIJoqLwHyJ6Rt/fHFg0dyA9KL4x1p+vvediodPV - -----END CERTIFICATE----- -``` - -Save the configuration file, for example, as ```iam-secret.yaml``` - -Create the secret by running -``` -oc apply -f iam-secret.yaml -``` - - -## Configure UMS to delegate authentication - -### Specify the IAM secret in the Custom Resource - -Edit the Custom Resource. - -In section ```shared_configuration.trusted_certificate_list``` add the name of the secret that you created in the previous step. -``` - trusted_certificate_list: - - iam-tls -``` - -**Note:** During deployment, the operator adds the IAM signer certificate to the truststore of the User Management Service. - -### Specify OpenID Client configuration - -Navigate to ```https://:/idauth/oidc/endpoint/OP/.well-known/openid-configuration```. -From the response of this web site obtain the ```authorizationEndpointUrl```, ```tokenEndpointUrl```, ```issuerIdentifier```, ```jwkEndpointUrl``` and ```signatureAlgorithm```. - -Edit the Custom Resource. - -In the ```ums_configuration``` for the ```cusom_xml``` parameter specify ```cliendId```, ```clientSecret``` and the values that you obtained in the previous step. -Specify the authFilter to redirect only URLs that point to ```/oidc/endpoint/ums/authorize``` - -``` - custom_xml: | - - - - - - - -``` - -For a detailed explanation of the parameters in the OIDC client configuration see [Configuring an OpenID Connect Client in Liberty](https://www.ibm.com/support/knowledgecenter/SSEQTP_liberty/com.ibm.websphere.wlp.doc/ae/twlp_config_oidc_rp.html). - -Save the Custom Resource. - -**Note:** During deployment, the operator adds the OIDC client configuration to the server configuration of the User Management Service. - -## Continue with UMS configuration -You configured the User Management Service to delegate authentication to IAM. - -Continue with the UMS configuration: [README_config.md](README_config.md) diff --git a/UMS/README_config_OIDC.md b/UMS/README_config_OIDC.md deleted file mode 100644 index 8a9424dd..00000000 --- a/UMS/README_config_OIDC.md +++ /dev/null @@ -1,97 +0,0 @@ -# Configure the User Management Service to delegate authentication to an OIDC Identity Provider - -## Prerequisites -OIDC Identity Provider (IdP) is accessible. - -## Introduction - -This documentation describes the steps to configure UMS to delegate authentication to an OIDC IdP. -To configure UMS to delegate authentication to an OIDC IdP, perform the following steps: -1. Register UMS as an OIDC client with the IdP -2. Create an IdP secret -3. Configure UMS to delegate authentication - - -## Register UMS as OIDC client of the OIDC IdP - -Follow the instructions of your identity provider to register UMS as an OIDC client. - -## Create an OIDC IdP secret - -Obtain the signer certificate of your OIDC IdP. - -Create a configuration file for the secret. -Specify a name for the secret, for example, ```idp-tls```. -In section ```stringData``` add the signer certificate that you obtained in the previous step as the value of the property ```tls.crt```. - -``` -apiVersion: v1 -kind: Secret -metadata: - name: idp-tls -type: Opaque -stringData: - tls.crt: |+ - -----BEGIN CERTIFICATE----- - .... - -----END CERTIFICATE----- -``` - -Save the configuration file, for example, as ```idp-secret.yaml``` - -In the namespace where UMS will be deployed, create the secret by using the OpenShift command line interface by running -``` -oc apply -f idp-secret.yaml -``` - - -## Configure UMS to delegate authentication - -### Specify the secret with the signer certificate in Custom Resource - -Edit the Custom Resource. -In section ```shared_configuration.trusted_certificate_list``` add the name of the secret that you created in the previous step. -``` - trusted_certificate_list: - - idp-tls -``` - -**Note:** During deployment, the operator adds the IdP signer certificate to the truststore of the User Management Service. - -### Specify the OpenID Client configuration - -Obtain the ```authorizationEndpointUrl```, ```tokenEndpointUrl```, ```issuerIdentifier```, ```jwkEndpointUrl``` and ```signatureAlgorithm``` of your IdP. - -Edit the Custom Resource. - -In the ```ums_configuration``` for the ```custom_xml``` parameter specify ```cliendId```, ```clientSecret``` and the values that you obtained in the previois step. -Specify the authFilter to redirect only URLs that point to ```/oidc/endpoint/ums/authorize``` - -``` - custom_xml: | - - - - - - - -``` - -For a detailed explanation of the parameters in the OIDC client configuration see [Configuring an OpenID Connect Client in Liberty](https://www.ibm.com/support/knowledgecenter/SSEQTP_liberty/com.ibm.websphere.wlp.doc/ae/twlp_config_oidc_rp.html). - -Save the Custom Resource. - -**Note:** During deployment, the operator adds the OIDC client configuration to the server configuration of the User Management Service. - -## Continue with UMS configuration -You configured the User Management Service to delegate authentication to an OIDC IdP. - -Continue with the UMS configuration: [README_config.md](README_config.md) diff --git a/UMS/README_config_SSL.md b/UMS/README_config_SSL.md deleted file mode 100644 index 90ae9c75..00000000 --- a/UMS/README_config_SSL.md +++ /dev/null @@ -1,97 +0,0 @@ -# Configuring secure communications with UMS -To reach UMS from outside of the kubernetes cluster, the client (for example, a browser or a programmatic client) connects to `ums-route`, `ums-sso-route`, `ums-scim-route` or `ums-teams-route` that are created during UMS deployment. There are dedicated routes for each of the features that UMS provides to help separate network traffic. -Each route in turn, communicates with its respective service: `ums-service`, `ums-sso-service`, `ums-scim-service` and `ums-teams-service` that load balances between UMS pods. - -![UMS in k8s](images/ums-in-k8s-v2.png) - -In this diagram BAS (Studio) and IAWS (Workstreams) are sample clients of UMS that invoke one or more UMS features by calling k8s services with generated certificates that are signed by a generated root certificate authority. External clients like browsers or programmatic clients connect to one or more of the routes, which you can optionally configure using your own certificates. - -To ensure that sensitive information is protected in transit when communicating with UMS pods, routes only accept encrypted requests and communicate over encrypted channels with their respective services by default. Consequently TLS certificates are required for each of the routes and each of the services. This documentation describes the different options and provides instructions on how to configure secure communications with UMS pods. - -**Note:** Communication is secured by using TLS certificates. To simplify the overall configuration, you can leave it up to the operator to generate TLS certificates and secrets. All UMS-related generated certificates are automatically updated before they expire. - -## Option 1 - No external certificate - -In a test environment, you might only want to test features and functions and might not want to deal with certificates. -In this case, do not create `external_tls_secret_name`, `external_tls_ca_secret_name`, `external_tls_teams_secret`, `external_tls_sso_secret`, `external_tls_scim_secret` and remove these parameters from the Custom Resource. - -By using this configuration option, `root_ca_secret` is used to generate an internal TLS secret for all services and an external TLS secret for each of the routes `ums-route`, `ums-sso-route`, `ums-scim-route`, `ums-teams-route`. - -| Custom Resource parameter | Origin | -| --------------------------------------------- | ------------------| -| `shared_configuration.root_ca_secret` | Not configured. Secret will be created by operator with a generated signer certificate | -| `ums_configuration.external_tls_secret_name` | Not configured. Secret will be created by operator with a generated certificate signed by the signer in `shared_configuration.root_ca_secret` | -| `ums_configuration.external_tls_scim_secret_name` | Not configured. Secret will be created by operator with a generated certificate signed by the signer in `shared_configuration.root_ca_secret` | -| `ums_configuration.external_tls_sso_secret_name` | Not configured. Secret will be created by operator with a generated certificate signed by the signer in `shared_configuration.root_ca_secret` | -| `ums_configuration.external_tls_teams_secret_name` | Not configured. Secret will be created by operator with a generated certificate signed by the signer in `shared_configuration.root_ca_secret` | -| `ums_configuration.external_tls_ca_secret_name` | Not configured. There are no intermediary certificates required to complete the certificate chain | - -**Note:** If you do not provide a [root] signing CA in the `shared_configuration` section of the Custom Resource, `root_ca_secret` is generated by the operator with a self-signed root CA. - -## Option 2 - Customer-provided external certificate - -In a production environment, communications are secured by using a TLS certificate that represents the hostnames of the routes that your clients connect to. You can provide an external certificate that is signed by an external certificate authority (CA) that is trusted by your clients. - -**Note:** You can also generate a certificate using openssl, see section [Creating TLS certificates using openssl](#Creating-TLS-certificates-using-openssl) - -Generate a secret (`ibm-dba-ums-external-tls-secret`) to include the key and the external certificate. -``` -oc create secret tls ibm-dba-ums-external-tls-secret --key=tls.key --cert=tls.crt -``` - -Repeat this step to generate `ibm-dba-ums-external-tls-sso-secret`, `ibm-dba-ums-external-tls-scim-secret`, `ibm-dba-ums-external-tls-teams-secret`. - -Generate a secret (`ibm-dba-ums-external-tls-ca-secret`) to include any number of signer certificates that are necessary to trust the external certificate. -This can be required if your external certificate was cross-signed by a second certificate authority or if the tls.crt file does not include ALL certificates of -its certification chain. -``` -oc create secret generic ibm-dba-ums-external-tls-ca-secret --from-file=cacert.crt=
-``` - - -Provide secrets to the operator in the `ums_configuration` section of the Custom Resource: -```yaml -ums_configuration: - ... - external_tls_secret_name: ibm-dba-ums-external-tls-secret - external_tls_ca_secret_name: ibm-dba-ums-external-tls-ca-secret - external_tls_teams_secret_name: ibm-dba-ums-external-tls-teams-secret - external_tls_scim_secret_name: ibm-dba-ums-external-tls-scim-secret - external_tls_sso_secret_name: ibm-dba-ums-external-tls-sso-secret -``` - -**Note:** If the signer certificate is chained in the external certificate, `ibm-dba-ums-external-tls-ca-secret` is not required, and you should leave this parameter empty: -```yaml -ums_configuration: - ... - external_tls_secret_name: ibm-dba-ums-external-tls-secret - external_tls_ca_secret_name: -``` - -By using this configuration option, the customer-provided external certificate is used as the ums-route, ums-sso-route, ums-scim-route, ums-teams-route certificate. -The operator generates a certificate for the UMS pod that is signed with the `root_ca_secret`. -Signer certificates are configured for the `ums-route`, `ums-sso-route`, `ums-scim-route`, `ums-teams-route`, so that clients can trust these routes. - -| Custom Resource parameter | Origin | -| --------------------------------------------- | ------------------| -| `shared_configuration.root_ca_secret` | Not configured. Secret will be created by operator with a generated signer certificate. If you have a custom signer certificate that should be used to sign certificates for k8s services and routes, you can create a secret and provide its name here. | -| `ums_configuration.external_tls_secret_name` | Customer provided certificate in TLS secret | -| `ums_configuration.external_tls_scim_secret_name` | Customer provided certificate in TLS secret. It can be the same as external_tls_secret_name, if valid for all hostnames. | -| `ums_configuration.external_tls_sso_secret_name` | Customer provided certificate in TLS secret. It can be the same as external_tls_secret_name, if valid for all hostnames. | -| `ums_configuration.external_tls_teams_secret_name` | Customer provided certificate in TLS secret. It can be the same as external_tls_secret_name, if valid for all hostnames. | -| `ums_configuration.external_tls_ca_secret_name` | Rarely required. Any intermediary certificates to complete the certificate chain | - -### Creating TLS certificates using openssl - -You can create a TLS certificate signing request by executing OpenSSL. Note that the final certificate should have a `Subject Alternative Names` (SAN) value that matches the hostname. Many certificate authorities allow you to specify SANs during the ordering process, otherwise you must provide the SAN directly in the certificate signing request (CSR). -``` -openssl req -new -newkey rsa:2048 -subj "/CN=UMS" -extensions SAN -days 365 -nodes -out ums.csr -config <(cat /etc/ssl/openssl.cnf <(printf "[SAN]\nsubjectAltName=DNS:ums.mycluster.com")) -``` - -Two files are generated: a private key (privkey.pem) and a certificate signing request that can be sent to your certificate authority for sigining. -Use the private key and your certificate authority's response to generate the secrets `ibm-dba-ums-external-tls-secret`, `ibm-dba-ums-external-tls-teams-secret`, `ibm-dba-ums-external-tls-scim-secret`, `ibm-dba-ums-external-tls-sso-secret`. -If the response from your certificate authority does not include all certificates from its signing chain, you can provide them in `ibm-dba-ums-external-tls-ca-secret` - -## Continue with the UMS configuration - -Continue with the UMS configuration: [README_config.md](README_config.md) diff --git a/UMS/README_config_db2.md b/UMS/README_config_db2.md deleted file mode 100644 index f8c1d13a..00000000 --- a/UMS/README_config_db2.md +++ /dev/null @@ -1,85 +0,0 @@ -# Configure Db2 as the UMS datasource - -Create the OAuth database, for example, `UMSDB`, by running -``` -db2 create database UMSDB automatic storage yes using codeset UTF-8 territory US pagesize 32768 -``` - -In the section `dc_ums_datasource` adjust the database configuration parameters. - -```yaml - -datasource_configuration: - dc_ums_datasource: # credentials are read from ums_configuration.admin_secret_name - # oauth database config - dc_ums_oauth_type: db2 # derby (for test), db2, oracle - dc_ums_oauth_host: - dc_ums_oauth_port: 50000 - dc_ums_oauth_name: UMSDB - dc_ums_oauth_schema: - dc_ums_oauth_ssl: false - dc_ums_oauth_ssl_secret_name: - dc_ums_oauth_driverfiles: - dc_ums_oauth_alternate_hosts: - dc_ums_oauth_alternate_ports: - # teamserver database config - dc_ums_teamserver_type: db2 # derby (for test), db2, oracle - dc_ums_teamserver_host: - dc_ums_teamserver_port: 50000 - dc_ums_teamserver_name: UMSDB - dc_ums_teamserver_ssl: false - dc_ums_teamserver_ssl_secret_name: - dc_ums_teamserver_driverfiles: - dc_ums_teamserver_alternate_hosts: - dc_ums_teamserver_alternate_ports: -``` -For information about UMS configuration parameters and their default values, see -[UMS datasource parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ums_params_database.html) - -## Configure database failover servers (optional) - -To cover the possibility that the primary server is unavailable during the initial connection attempt, you can configure a list of failover servers, as described in [Configuring client reroute for applications that use DB2 databases](https://www.ibm.com/support/knowledgecenter/en/SSEQTP_liberty/com.ibm.websphere.wlp.doc/ae/twlp_config_reroute_db2.html). - -In the Custom Resource, provide a comma-separated list of failover servers and failover ports. -For example, if there are two failover servers -* server1.db2.company.com on port 50443 -* server2.db2.company.com on port 51443 - -in `dc_ums_datasource section` specify: -```yaml -datasource_configuration: - dc_ums_datasource: - ... - dc_ums_oauth_alternate_hosts: "server1.db2.company.com, server2.db2.company.com" - dc_ums_oauth_alternate_ports: "50443, 51443" - ... - dc_ums_teamserver_alternate_hosts: "server1.db2.company.com, server2.db2.company.com" - dc_ums_teamserver_alternate_ports: "50443, 51443" -``` - - -## Configure SSL between UMS and Db2 (optional) -To ensure that all communications between UMS and Db2 are encrypted, import the database CA Certificate to UMS and create a secret to store the certificate: - -``` -oc create secret generic ibm-dba-ums-db2-cacert --from-file=cacert.crt= -``` - -**Note:** The certificate must be in PEM format. Specify the `` to point to the certificate file. Do not change the part `--from-file=cacert.crt=`. - -Use the generated secret to configure the Db2 SSL parameters in the Custom Resource: -```yaml -datasource_configuration: - dc_ums_datasource: - ... - dc_ums_oauth_ssl_secret_name: ibm-dba-ums-db2-cacert - dc_ums_oauth_ssl: true - ... - dc_ums_ts_ssl_secret_name: ibm-dba-ums-db2-cacert - dc_ums_ts_ssl: true -``` - -## Continue with UMS configuration -You configured Db2 as the UMS datasource. - -Continue with the UMS configuration: [README_config.md](README_config.md) diff --git a/UMS/README_config_ldap_ssl.md b/UMS/README_config_ldap_ssl.md deleted file mode 100644 index b4243e2f..00000000 --- a/UMS/README_config_ldap_ssl.md +++ /dev/null @@ -1,62 +0,0 @@ -# Configure UMS to use LDAP over SSL - -## Generate a secret that contains the LDAP certificate - -### Obtain the LDAP certificate - -There are several options to obtain the LDAP certificate. - -1. If you have Java installed, you can obtain the certificate (including all its signers) using `keytool`: - -``` -keytool -printcert -sslserver $ldaphost:$ldapport -rfc > ldapcerts.pem -``` - -2. You can obtain the certificate (including all its signers) using OpenSSL: - -``` -echo | openssl s_client -showcerts -connect $ldaphost:$ldapport 2>&1 /c/temp/ldapcerts.pem -``` - - -### Generate the secret - -Create a yaml configuration file, e.g. `ldap-ssl-cert.yaml` containing the certificate. - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: ldap-ssl-cert -type: Opaque -stringData: - ldap-cacert.crt: |- - -----BEGIN CERTIFICATE----- - - -----END CERTIFICATE----- -``` - -Generate the secret by running -``` -oc create -f ldap-ssl-cert.yaml -``` - -## Configure ldap_configuration parameters - -In the Custom Resource, enable LDAP over SSL by setting `ldap_configuration.lc_ldap_ssl_enabled: true` and configure the -parameter `ldap_configuration.lc_ldap_ssl_secret_name` to point to the secret containing the signer certificate of the LDAP: - -```yaml - ldap_configuration: - ... - lc_ldap_ssl_enabled: true - lc_ldap_ssl_secret_name: ldap-ssl-cert - -``` - -**Note:** During deployment, the operator will add the certificate to the truststore of UMS and enable UMS to use SSL for communication with LDAP. - -## Continue with UMS configuration -You enabled UMS to use LDAP over SSL. - -Continue with the UMS configuration: [README_config.md](README_config.md) \ No newline at end of file diff --git a/UMS/README_config_oracle.md b/UMS/README_config_oracle.md deleted file mode 100644 index a202f065..00000000 --- a/UMS/README_config_oracle.md +++ /dev/null @@ -1,102 +0,0 @@ -# Configure Oracle as the UMS datasource - -## Update datasource configuration in the Custom Resource -In section `dc_ums_datasource` adjust database configuration parameters. - -```yaml -datasource_configuration: - dc_ums_datasource: # credentials are read from ums_configuration.admin_secret_name - # oauth database config - dc_ums_oauth_type: oracle # derby (for test), db2, oracle - dc_ums_oauth_host: - dc_ums_oauth_port: 1521 - dc_ums_oauth_name: - dc_ums_oauth_schema: - dc_ums_oauth_ssl: false - dc_ums_oauth_ssl_secret_name: - dc_ums_oauth_driverfiles: ojdbc8.jar - dc_ums_oauth_alternate_hosts: - dc_ums_oauth_alternate_ports: - -``` - -For the mandatory UMS Teams database, only Db2 is supported. -Follow instructions in [Configure Db2 as the UMS datasource](README_config_db2.md) to configure Db2. - -## Provide Oracle JDBC drivers - -Create a persistent volume and create a persistent volume claim for that PV. -Consider the following sample configuration in `my-data-pv.yaml`. Add the hostname or IP address of your NFS server to the configuration. - -```yaml -kind: PersistentVolume -apiVersion: v1 -metadata: - name: data-pv - labels: - type: icp4a-pv -spec: - capacity: - storage: 1Gi - volumeMode: Filesystem - accessModes: - - ReadWriteMany - persistentVolumeReclaimPolicy: Retain - storageClassName: inf-node - mountOptions: - - nolock - nfs: - path: /data - server: ---- -kind: PersistentVolumeClaim -apiVersion: v1 -metadata: - name: data-pvc -spec: - accessModes: - - ReadWriteMany - volumeMode: Filesystem - storageClassName: inf-node - resources: - requests: - storage: 1Gi - selector: - matchLabels: - type: icp4a-pv - volumeName: data-pv -``` - -Deploy the PV and PVC: -``` -oc create -f my-data-pv.yaml -``` - -In section `ums_configuration` configure parameters `use_custom_jdbc_drivers` and `existing_claim_name`: - -``` -use_custom_jdbc_drivers: true -existing_claim_name: data-pvc -``` - -Copy the Oracle JDBC driver to the jdbc/oracle directory on the mounted file system. - -``` - /data - - └── jdbc - - └── oracle - - └── ojdbc8.jar - -``` -**Note:** The name of the JDBC driver is referenced in property `datasource_configuration.dc_ums_oauth_driverfiles` in the Custom Resource. - -For information about UMS configuration parameters and their default values, see -[UMS Database Configuration Parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ums_params_database.html) - -## Continue with UMS configuration -You configured Oracle as the UMS datasource. - -Continue with the UMS configuration: [README_config.md](README_config.md) diff --git a/UMS/README_migrate.md b/UMS/README_migrate.md deleted file mode 100644 index 9f50285a..00000000 --- a/UMS/README_migrate.md +++ /dev/null @@ -1,17 +0,0 @@ -# Migrate User Management Service configuration from 19.0.2 and 19.0.3 to 20.0.1 - - -## Update the Custom Resource - -To update the configuration file from 19.0.2 to 19.0.3 follow the [Migrate User Management Service configuration from 19.0.2 to 19.0.3](https://github.com/icp4a/cert-kubernetes/blob/19.0.3/UMS/README_migrate.md). - -To update the configuration file from 19.0.3 to 20.0.1 you must add the parameters that were introduced in 20.0.1. Refer to [UMS parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ums_params_ums.html), where the new configuration parameters are flagged as new in 20.0.1. - -## Complete the migration - -Return to the appropriate migrate page to configure other components and complete migration. - -Migrate pages: -- [Managed OpenShift migrate page](../platform/roks/migrate.md) -- [OpenShift migrate page](../platform/ocp/migrate.md) -- [Kubernetes migrate page](../platform/k8s/migrate.md) diff --git a/UMS/README_upgrade.md b/UMS/README_upgrade.md deleted file mode 100644 index 5d8bdc41..00000000 --- a/UMS/README_upgrade.md +++ /dev/null @@ -1,109 +0,0 @@ -# Upgrade User Management Service from 19.0.3 to 20.0.1 - -This document describes the 20.0.1 updates to the User Management Service related configuration parameters. - -## Update shared configuration parameters - -In `spec` make sure you have defined - -| Custom Resource parameter | Comment | -| ------------------------------------------------------------------------------ | ------------------| -| shared_configuration.sc_deployment_type | | -| appVersion | 20.0.1 | -| shared_configuration.sc_deployment_platform | OCP or NonOCP | - -For information about shared configuration parameters refer to [Shared Configuration parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_shared_config_params.html) - - -## Update datasource configuration parameters - -The following UMS configuration parameters are new in 20.0.1. - -| Custom Resource parameter | Comment | -| ------------------------------------------------------------------------------ | ------------------| -| datasource_configuration.dc_ums_datasource.dc_ums_oauth_schema | Optional parameter, can be specified if a schema was created. | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_type | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_host | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_port | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_name | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_ssl | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_ssl_secret_name | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_driverfiles | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_alternate_hosts | | -| datasource_configuration.dc_ums_datasource.dc_ums_teamserver_alternate_ports | | - -Except for the `datasource_configuration.dc_ums_datasource.dc_ums_oauth_schema`, the new parameters are used to configure the datasource for -UMS Teams, a capability that is new in IBM Cloud Pak for Automation 20.0.1. For more information, see [User Management Service Teams](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/con_ums_teams_option.html). - -For information about the database configuration parameters, refer to [UMS datasource parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ums_params_database.html) - -## Update UMS docker images - -Update the UMS docker image tags to point to the 20.0.1 images and add the `shared_configuration.umsregistration_initjob` parameter that is new in 20.0.1 - - -| Custom Resource parameter | Comment | -| ------------------------------------------------------------------------------ | ------------------| -| ums_configuration.images.ums.tag | update to 20.0.1 | -| shared_configuration.images.keytool_init_container.tag | update to 20.0.1 | -| shared_configuration.images.keytool_job_container.tag | update to 20.0.1 | - - -## LDAP configuration - -There are no changes to the `ldap_configuration` section. -For information about LDAP configuration parameters and sample values refer to [Configuring the LDAP and user registry](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_k8s_ldap.html). - - -## UMS configuration parameters - -### Changed parameters - -In 20.0.1, changes in behavior apply to the following parameters in `ums_configuration` - -| Custom Resource parameter | Comment | -| --------------------------------------------- | -------------------- | -| ums_configuration.db_secret_name | Removed in 20.0.1, move the database credentials to the secret `ums_configuration.admin_secret_name` | -| ums_configuration.hostname | In 20.0.1, if no hostname is specified, the hostname is generated from the `shared_configuration.sc_deployment_hostname_suffix` parameter | -| ums_configuration.external_tls_secret_name | In 20.0.1, to avoid an invalid configuration in the Custom Resource it is required to create the secret if the parameter is set. If the parameter is set, but the secret is not created, UMS will not be deployed and an error will be thrown in the operator log.| - -This `ums-secret.yaml` configuration file provides an example of how to configure the `ibm-dba-ums-secret` - -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: ibm-dba-ums-secret -type: Opaque -stringData: - adminUser: "admin" - adminPassword: "admin" - oauthDBUser: "db2inst1" - oauthDBPassword: "!Passw0rd" - tsDBUser: "db2inst1" - tsDBPassword: "!Passw0rd" -``` - -After you have created the secret, update your Custom Resource to configure `ums_configuration.admin_secret_name` to point to the secret `ibm-dba-ums-secret`. - -### New parameters -The following optional parameters are new in 20.0.1, they support long-lived access tokens. For more information, see Refer to [Using long-lived access tokens](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.offerings/topics/con_ums_sso_app_token.html). - -| Custom Resource parameter | -| ----------------------------------------------------- | -| ums_configuration.oauth.token_manager_group | -| ums_configuration.oauth.access_token_lifetime | -| ums_configuration.oauth.app_token_lifetime | -| ums_configuration.oauth.app_password_lifetime | -| ums_configuration.oauth.app_token_or_password_limit | -| ums_configuration.oauth.client_secret_encoding | - -For information about UMS configuration parameters and sample values, see [UMS parameters](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.ref/k8s_topics/ref_ums_params_ums.html). - -## Complete the upgrade -Return to the appropriate update page to configure other components and complete the deployment using the operator. - -Update pages: - - [Managed OpenShift installation page](../platform/roks/update.md) - - [OpenShift installation page](../platform/ocp/update.md) - - [Certified Kubernetes installation page](../platform/k8s/update.md) diff --git a/UMS/images/authentication-flows.jpg b/UMS/images/authentication-flows.jpg deleted file mode 100644 index 5cbccade..00000000 Binary files a/UMS/images/authentication-flows.jpg and /dev/null differ diff --git a/UMS/images/ums-in-k8s-v2.png b/UMS/images/ums-in-k8s-v2.png deleted file mode 100644 index 44693b72..00000000 Binary files a/UMS/images/ums-in-k8s-v2.png and /dev/null differ diff --git a/UMS/images/ums-in-k8s.jpg b/UMS/images/ums-in-k8s.jpg deleted file mode 100644 index 1aebe276..00000000 Binary files a/UMS/images/ums-in-k8s.jpg and /dev/null differ diff --git a/demo/README.md b/demo/README.md deleted file mode 100644 index cec03e9c..00000000 --- a/demo/README.md +++ /dev/null @@ -1,29 +0,0 @@ -# IBM Cloud Pak for Automation 20.0.1 for demonstration purposes - -This repository includes folders and resources to help you install the Cloud Pak for Automation software for demonstration purposes on Red Hat OpenShift Cloud Platform (OCP) 3.11. - -Cloud Pak for Automation capabilities can be installed on OCP by running a deployment script and selecting a deployment pattern. A deployment pattern includes a single Cloud Pak capability, as well as Db2 and OpenLDAP when they are needed. - -> **Note:** In 20.0.1, patterns cannot be used to install the Cloud Pak for enterprise purposes. - -In addition to the patterns, you can also install Business Automation Insights and Automation Digital Worker for demonstration purposes. These capabilities use a pattern deployment to demonstrate their value. - -- [Single capability deployment](README.md#single-capability-deployment) -- [Combined capabilities deployment](README.md#extended-pattern-deployment) - -## Single capability deployment - -To install a pattern with the Cloud Pak operator, an OCP administrator must run a script to setup a cluster and work with a non-administrator user to help them run the deployment script. - -Each pattern has a single Cloud Pak capability and a list of optional components that can be installed with the pattern. The deployment script prompts the user to enter values to get access to the container images and to select what is installed in the deployment. - -To install a pattern, click the link [Install a deployment pattern on Red Hat OpenShift](install_pattern_ocp.md). - -## Combined capabilities deployment - -Both Business Automation Insights and Automation Digital Worker need other Cloud Pak capabilities. To use more than one capability in an OCP cluster you must install a single deployment pattern by using the Cloud Pak operator, install Business Automation Insights or Automation Digital Worker, and configure the components to work with each other. - -To install multiple capabilities, click one of the following links: - -- [Install Business Automation Insights with a pattern on Red Hat OpenShift](install_insights_ocp.md) -- [Install Automation Digital Worker with a pattern on Red Hat OpenShift](install_workers_ocp.md) diff --git a/demo/install_insights_ocp.md b/demo/install_insights_ocp.md deleted file mode 100644 index 50907032..00000000 --- a/demo/install_insights_ocp.md +++ /dev/null @@ -1,411 +0,0 @@ -# Business Automation Insights with demo patterns on Red Hat OpenShift 3.11 - -- [Installing Business Automation Insights with two demo patterns](install_insights_ocp.md#installing-business-automation-insights-with-two-demo-patterns) -- [Uninstalling Business Automation Insights and the demo patterns](install_insights_ocp.md#uninstalling-business-automation-insights-and-the-demo-patterns) -- [Troubleshooting](install_insights_ocp.md#troubleshooting) - -# Installing Business Automation Insights with two demo patterns - -Business Automation Insights is installed on a single node with a script. The Operational Decision Manager pattern and the FileNet Content Manager pattern are installed by the Cloud Pak operator with a cluster setup script and a deployment script. - -- [Prerequisites](install_insights_ocp.md#prerequisites) -- [Task 1: Prepare your environment](install_insights_ocp.md#task-1-prepare-your-environment) -- [Task 2: Install Business Automation Insights for a server](install_insights_ocp.md#task-2-install-business-automation-insights-for-a-server) -- [Task 3: Install the Operational Decision Manager demo pattern (optional)](install_insights_ocp.md#task-3-install-the-operational-decision-manager-demo-pattern-optional) -- [Task 4: Verify the Decision dashboard in Business Automation Insights](install_insights_ocp.md#task-4-verify-the-decision-dashboard-in-business-automation-insights) -- [Task 5: Install the FileNet Content Manager demo pattern (optional)](install_insights_ocp.md#task-5-install-the-filenet-content-manager-demo-pattern-optional) -- [Task 6: Verify the Content dashboard in Business Automation Insights](install_insights_ocp.md#task-6-verify-the-content-dashboard-in-business-automation-insights) - - -## Prerequisites -Make sure you have access to the following configuration: -- A Red Hat OpenShift cluster v3.11 -- A single macOS or Linux machine to host Business Automation Insights - -## Task 1: Prepare your environment -1. Install Docker and Docker Compose on your machine - - On macOS: - - Follow the instructions here https://docs.docker.com/docker-for-mac/ - - - On Linux: - - a. Install Docker - ``` - yum install docker - ``` - b. Install Docker Compose - ``` - curl -L "https://github.com/docker/compose/releases/download/1.25.3/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose - ``` - c. Start the Docker daemon - ``` - systemctl start docker - ``` - d. Check that Docker has started correctly - ``` - docker version - ``` - e. Change execution permissions - ``` - chmod +x /usr/local/bin/docker-compose - ``` - f. Check that Docker Compose is installed correctly - ``` - docker-compose version - ``` - -2. If the `hostname` command is not installed on your machine, install it - ``` - yum install bind-utils - ``` -3. Install the `oc` client - - a. Select and download the desired openshift-client from https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/ - - b. Extract the `oc` client files - - Example:  - ``` - wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux-4.3.1.tar.gz - tar -xvf ./openshift-client-linux-4.3.1.tar.gz - ``` - - On Linux, you can add the `oc` client to your path as follows: - ``` - mv oc /usr/local/bin/ - ``` - -## Task 2: Install Business Automation Insights for a server -1. Download the installation files - - a. Search for the Business Automation Insights for a server part number (CC5PYEN) on the Xtrem Leverage site https://w3-03.ibm.com/software/xl/download/ticket.wss  - - b. Extract the files - ``` - tar -xzvf bai-for-server-$VERSION.tgz - ``` -2. Install Business Automation Insights for a server - - a. Go to the `bai-for-server-$VERSION` directory. - ``` - cd bai-for-server-$VERSION - ``` - - b. Start IBM Business Automation Insights. - ``` - ./bin/bai-start --acceptLicense --init - ``` - The first time you start IBM Business Automation Insights, you must read and accept the license, and pass the `--init` option, which initializes the product configuration and generates the various necessary certificates. If you later restart the product, do not pass the `--init` option. - - c. Answer the script questions. - - The username and password information of each component (Kafka, Elasticsearch...) is available in the `/.env` hidden file, as well as any other required information. - - The output of the script is a Kibana URL. - - d. To verify the installation of Business Automation Insights, launch the Kibana URL. - - Enter the kibana user and kibana password that you specified in the `bai-start` script. - - - -## Task 3: Install the Operational Decision Manager demo pattern (optional) - -1. Log in to your OpenShift cluster - - a. Open the cluster console. - - b. In the top right of the console, click `copy login command`. - - c. In a terminal window, paste this command as `oc login ...` - -2. Create the namespace where you plan to install Operational Decision Manager. - ``` - oc new-project - ``` -3. Create a Kubernetes secret for the Business Automation Insights emitter with Operational Decision Manager - - a. Create the configuration file `plugin-configuration.properties`, for example: - ``` - com.ibm.rules.bai.plugin.kafka.sasl.mechanism=PLAIN - com.ibm.rules.bai.plugin.kafka.security.protocol=SASL_SSL - com.ibm.rules.bai.plugin.kafka.ssl.enabled.protocols=TLSv1.2 - com.ibm.rules.bai.plugin.kafka.ssl.truststore.location=/config/baiemitterconfig/truststore.jks - com.ibm.rules.bai.plugin.kafka.ssl.truststore.password=TRUSTSTOREPASSWORD - com.ibm.rules.bai.plugin.kafka.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="KAFKAUSER" password="KAFKAPASSWORD"; - com.ibm.rules.bai.plugin.topic=bai-ingress - com.ibm.rules.bai.plugin.kafka.bootstrap.servers=KAFKAHOST:29092 - ``` - - Make sure the file is named exactly `plugin-configuration.properties` - - Edit the file - - TRUSTSTOREPASSWORD - Copy the value from the file `/certs/kafka/store-password.txt` - - KAFKAUSER - Credentials that you specified as kafka user in the `bai-start` script - - KAFKAPASSWORD - Credentials that you specified as kafka password in the `bai-start` script. - You have to decode it and put it in plain text in `plugin-configuration.properties` - - KAFKAHOST - The machine host name that you entered when installing Business Automation Insights (found by typing `hostname -f` on Linux, for example) - - b. Create a new directory - ``` - mkdir odmsecret - ``` - c. Get the `truststore.jks` file found in `/certs/kafka/` and put it together with the `plugin-configuration.properties` file inside the `odmsecret` folder. - ``` - scp root@:/certs/kafka/truststore.jks . - scp root@:/certs/kafka/store-password.txt . - ``` - - d. Go to the `odmsecret` folder and create the Kubernetes secret - ``` - oc create secret generic baiodmsecrets --from-file=./plugin-configuration.properties --from-file=./truststore.jks - ``` -4. Get the Operational Decision Manager pattern from GitHub - - Download or clone the following GitHub repository on your local machine and go to the `cert-kubernetes` directory. - ``` - git clone https://github.ibm.com/dba/cert-kubernetes - $ cd cert-kubernetes - ``` -5. Edit the `cert-kubernetes/descriptors/patterns/ibm_cp4a_cr_demo_decisions.yaml` CR file to add the Kubernetes secret - - Uncomment the lines `customization` and `baiEmitterSecretRef: baiodmsecrets` (near the end of the .yaml file, at the same level as `image`) - -6. Run the installation script - ``` - $ cd scripts - $ ./cp4a-demo-admin.sh - $ ./cp4a-pattern-deployment.sh  - ``` - - Answer the script questions - - Select `Operational Decision Manager` - - Put `iamapikey:yourkey` for the entitled registry - - - -## Task 4: Verify the Decision Dashboard in Business Automation Insights - -Wait for the installation to complete, and then verify that the Operational Decision Manager emitters are present. -Because the ODM demo pattern comes with a built-in sample, you can easily check ODM sending events to Business Automation Insights by using the Decision Server console. - -1. Open the Decision Server console (in the OpenShift console, look at the generated routes and click `odm-ds-onsole`). -Sign in with `odmAdmin`/`odmAdmin`. - - a. Click Explore > LoanValidationDS > loan_validation_production ruleset - - b. Click Add Property > Select `bai.emitter.enable`> Enter `true` > Click Add - - c. Click Retrieve HTDS Description File - - d. Select REST > Select JSON format > Click Test - - A new window opens that allows you to execute a ruleset. - In the execution request, you might want to remove the line "DecisionID" so that multiple events display in Kibana. - - e. Click Execute Request - - You get a server response. An event should have been sent to Kafka. - -2. Launch the Kibana URL to check the results - - a. Open the Kibana Decision Dashboard. Credentials are the ones you specified when you installed Business Automation Insights for a server. - - b. Click Dashboard > Select Decisions Dashboard - - - - -## Task 5: Install the FileNet Content Manager demo pattern (optional) - -1. Install the ECM pattern in a separate namespace - - a. Login to your cluster using `oc`. - - - Open your cluster console - - - In the top right of the console, click `copy login command`. - - - In a terminal window, paste this command as `oc login ...` - - b. Create the namespace where you plan to install ECM. - ``` - oc new-project - ``` - - c. Get the ECM pattern from GitHub - - Download or clone the following GitHub repository on your local machine and go to the `cert-kubernetes` directory. - ``` - git clone https://github.ibm.com/dba/cert-kubernetes - $ cd cert-kubernetes - ``` - - d. Run the installation script - - ``` - $ cd scripts - $ ./cp4a-demo-admin.sh - $ ./cp4a-pattern-deployment.sh  - ``` - - Answer the script questions - - Select `FileNet Content Manager` - - Put `iamapikey:yourkey` for the entitled registry - -2. Configure ECM to send events to Business Automation Insights - - a. Retrieve the Content event emitter module - - - Locate the `cpe-deploy` pod by running the command - ``` - oc get pods - ``` - - Download the `bai-content-emitter`archive - ``` - oc cp :lib/ContentBAI/eventhandler . - ``` - For example: - ``` - oc cp content-cpe-deploy-5464884bf6-k9xq:lib/ContentBAI/eventhandler . - ``` - - - - b. Configure the Content event emitter - - - Log into the FileNet Administration Console for Content Platform Engine - - Find the `cpe` route in OpenShift Console > Applications > Routes > content-cpe-route - - Paste the hostname into a browser and add `/acce` to the URL - The `acce` console opens - - In OpenShift Console > Resources > Secrets > ibm-fncm-secret, look for the username and password - - Create a new Event Action - - Locate `Event Actions` in the left menu, and then right click and select `New Event Action` - - Enter a name for this event action (for example, `myBAIEventAction`), and then click `Next` - - At the `Specify the Type of Event Action` section: - - Check `Status: Enabled` - - Select `Type: Class` - - At `Java class handler`, enter `com.ibm.bai.content.event.emitter.eventhandler.ContentEmitterHandler` - - Check `Configure code module` - - Click `Next` - - At the `Specify the Code Module` section: - - Enter the `Code module title`. Example: `EmitterCodeModule` - - At `Content elements`, click `Browser` to select the `bai-content-emitter.jar` you retrieved in the previous step. - - Click `Next`, and then click `Finish` - - - Create an Event Subscription - - Locate the `Subscriptions` folder in the left menu, and then right-click and select `New Subscription`. - - Enter a display name (for example, `BAISubscription`), and then click `Next` - - At the `Select Classes` section, set `Class type` and `Class` to `Document`. Click Next. - - At the `Specify the Subscription Behavior` section, keep the default settings. Click Next. - - At the `Select the Triggers` section, select `Creation Event and Update Event`. Click Next. - - At the Event action section, for `Select an event action`, select the event action you created in an earlier step (for example, `myBAIEventAction`). Click Next. - - At the `Specify Additional Options` section: - - For `Initial state`, select `Enable the Subscription` - - For `Subclass option`, select `Include subclasses` if you want to emit them. - - Do not select `Run synchronously` - - Click `Next`, and then click `Finish - - - c. Customize the configuration file - - - Locate the `cpe-cfgstore` persistent volume on your cluster. For example: - ``` - ssh root@mycluster.ocp - cd /export/NFS - ``` - - Locate the persistent volume by searching the directory, for example: - ``` - cd ecm-project-cpe-cfgstore123454 - ``` - - Create a new directory named `BAIForContent` - - Set permissions on the directory: - ``` - chown 50001:50000 BAIForContent - chmod -R g=u BAIForContent - chgrp -R 0 BAIForContent - - Copy the `truststore.jks` file found in /certs/kafka/ into the `BAIForContent directory` - - Edit a new file with the following template and name it `configuration` with no extension - ``` - contentemitter.input.content.server=CPE_HOSTNAME - contentemitter.output.kafka.topic=bai-ingress - contentemitter.output.kafka.bootstrap.servers=KAFKA_HOST:29092 - contentemitter.output.kafka.security.protocol=SASL_SSL - contentemitter.output.kafka.ssl.truststore.location=/opt/ibm/wlp/usr/servers/defaultServer/configDropins/overrides/BAIForContent/truststore.jks - contentemitter.output.kafka.ssl.truststore.password=KAFKA_BROKERS_TRUSTSTORE_PASSWORD - contentemitter.output.kafka.ssl.enabled.protocols=TLSv1.2 - contentemitter.output.kafka.ssl.truststore.type=JKS - contentemitter.output.kafka.ssl.endpoint.identification.algorithm= - contentemitter.output.kafka.sasl.mechanism=PLAIN - contentemitter.output.kafka.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="JAAS_CLIENT_USERNAME" password="JAAS_CLIENT_USER_PASSWORD"; - ``` - - - Change the values according to your installation - - CPE_HOSTNAME: Use the IP address of the master node - - KAFKA_BROKERS_TRUSTSTORE_PASSWORD: Copy the value from the file `/certs/kafka/store-password.txt` - - KAFKA_HOST: The machine host name that you entered when installing Business Automation Insights (found by typing `hostname -f on Linux`, for example) - - JAAS_CLIENT_USERNAME: Credentials that you specified as kafka user in the `bai-start` script - - JAAS_CLIENT_USER_PASSWORD: Credentials that you specified as kafka password in the `bai-start` script. You have to decode it and put it in plain text - - d. Restart the CPE pod - For example: - ``` - oc delete pod - ``` - -## Task 6: Verify the Content dashboard in Business Automation Insights - -In order to verify that an event has been submitted from FileNet to Business Automation Insights, you need to trigger the event by adding a new document. - -1. Open the FileNet console - -2. Log in to the FileNet navigator console - - a. Find the `cpe` route in OpenShift Console > Applications > Routes > content-navigator-route - - b. Paste the hostname into a browser and add `/navigator` to the URL - - The navigator console opens. - - c. In the OpenShift Console > Resources > Secrets > ibm-fncm-secret, look for the username and password - -3. Add a new document - - a. In the top right menu, click `Add Document` - - b. Enter a name and upload a test document - - c. Click `Add` - - A creation event must have been sent. - -4. Launch the Kibana URL to check the results - - a. Open the Kibana Content Dashboard. Credentials are the ones you specified when you installed Business Automation Insights for a server. - - b. Click Dashboard > Select Content Dashboard - - - - -# Uninstalling Business Automation Insights and the demo patterns - -To uninstall a demo deployment, delete the namespace by running the following command: -``` -$ oc delete project -``` - -To uninstall the cluster role, cluster role binding, and the CRD, run the following commands: -``` -$ oc delete clusterrolebinding -cp4a-operator -$ oc delete clusterrole ibm-cp4a-operator -$ oc delete crd icp4aclusters.icp4a.ibm.com -``` - -To uninstall Business Automation Insights for a server, refer to the Knowledge Center http://ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/bai_sn_topics/tsk_bai_sn_uninstall.html - - -# Troubleshooting - -Refer to the Knowledge Center http://ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.bai/topics/con_bai_sn_troubleshooting_top.html diff --git a/demo/install_pattern_ocp.md b/demo/install_pattern_ocp.md deleted file mode 100644 index 3256cf2c..00000000 --- a/demo/install_pattern_ocp.md +++ /dev/null @@ -1,254 +0,0 @@ -# Installing a deployment pattern on Red Hat OpenShift 3.11 - -This repository includes folders and resources to help you install the Cloud Pak for Automation software for demonstration purposes on Red Hat OpenShift Cloud Platform (OCP) 3.11. - -> **Restriction**: You cannot install the patterns on an IBM Managed RedHat OpenShift 3.11 cluster, also called RedHat OpenShift Kubernetes Service (ROKS). - -To install a pattern with the Cloud Pak operator, an OCP administrator user must run a script to set up a cluster and work with a non-administrator user to help them run a deployment script. Each pattern has a single Cloud Pak capability, a list of optional components that can be installed, as well as Db2 and OpenLDAP if they are needed. - -> **Note**: The scripts can only be used on a Linux-based operating system: Red Hat (RHEL), CentOS, and macOS. - -You can install one of the following capabilities, or patterns, for demonstration purposes: - - **Automation Applications**, which includes: - - Business Automation Studio (BAS) - - App Designer - - App Engine - - User Management Service (UMS) - - Business Automation Navigator (BAN) - - Application Discovery Plugin - - **Automation Content Analyzer**, which includes: - - Automation Content Analyzer (ACA) - - User Management Service (UMS) as an optional component - - **Automation Workstream Services**, which includes: - - AWS server - - Process Federation Server (PFS) - - App Engine - - Business Automation Navigator (BAN) - - Content Platform Engine (CPE) - - Content Search Services (CSS) - - Resource Registry (RR) - - User Management Service (UMS) as an optional component - - **FileNet Content Manager**, which includes: - - Content Platform Engine (CPE) - - Content Search Services (CSS) - - Business Automation Navigator (BAN) - - Content Services GraphQL - - Content Management Interoperability Services (CMIS) as an optional component - - **Operational Decision Manager** - - Operational Decision Manager (ODM) - -The "demo" deployment type provisions all of the required services like Db2, OpenLDAP, and Kafka with the default values so there is no need to prepare these in advance. - -Use the following sections to install or uninstall a pattern: -- [Install a deployment pattern](install_pattern_ocp.md#install-a-deployment-pattern) -- [Uninstall a deployment pattern](install_pattern_ocp.md#uninstall-a-deployment-pattern) - -# Install a deployment pattern - -- [Step 1: Plan and prepare (by an OCP cluster administrator)](install_pattern_ocp.md#step-1-plan-and-prepare-(by-an-ocp-cluster-administrator)) -- [Step 2: Get access to the container images (by an installer)](install_pattern_ocp.md#step-2-get-access-to-the-container-images-(by-an-installer)) -- [Step 3: Run the deployment script (by an installer)](install_pattern_ocp.md#step-3-run-the-deployment-script-(by-an-installer)) -- [Step 4: Verify that the automation containers are running](install_pattern_ocp.md#step-4-verify-that-the-automation-containers-are-running) -- [Step 5: Access the services](install_pattern_ocp.md#step-5-access-the-services) -- [Step 6: List the default LDAP users and passwords](install_pattern_ocp.md#step-6-list-the-default-ldap-users-and-passwords) -- [Step 7: Post-installation tasks](install_pattern_ocp.md#step-7-post-installation-tasks) -- [Troubleshoot](install_pattern_ocp.md#troubleshoot) - -## Step 1: Plan and prepare (by an OCP cluster administrator) - -The role of the cluster administrator is to gather the minimum system requirements to host and run the selected pattern of the Cloud Pak. A conversation needs to happen between the administrator and the non-administrator user (installer) to determine which pattern they want to install. - -The administrator must make sure that the target OpenShift cluster has the following tools and attributes. - - Kubernetes 1.11+. - - Kubernetes CLI. For more information, see https://kubernetes.io/docs/tasks/tools/install-kubectl/. - - The OpenShift Container Platform CLI. The CLI has commands for managing your applications, and lower-level tools to interact with each component of your system. Refer to the OpenShift 3.11: https://docs.openshift.com/container-platform/3.11/cli_reference/get_started_cli.html. - - Dynamic storage created and ready. - > **Tip**: For more information, see Kubernetes NFS Client Provisioner: https://github.com/kubernetes-incubator/external-storage/tree/master/nfs-client. There are instructions to configure NFS server and client for the OCP nodes if you need to setup an NFS server. - - At least one non-administrator user that can be used to run the deployment script. For example `cp4a-user`. - -The OCP cluster needs the following minimum requirements for each pattern: - -| Pattern name | Master/Infra/Worker nodes | CPU per node type | Memory per node | Storage | -| :--- | :--- | :--- | :--- | :--- | -| Automation Applications | 1/1/3 | 4/4/4 | 8Gi | 53 GB | -| Automation Content Analyzer | 1/1/2 | 4/4/8 | 16Gi | 100 GB | -| Automation Workstream Services | 1/1/3 | 6/6/6 | 16Gi | 66 GB | -| FileNet Content Manager | 1/1/3 | 4/4/4 | 16Gi | 65 GB | -| Operational Decision Manager | 1/1/3 | 4/4/4 | 4Gi | 5 GB | - - > **Note**: The Master and Infrastructure nodes can be located on the same host as long as it has enough resources. Masters with a co-located **etcd** need a minimum of 4 cores. OCP 3.11 does not support Docker alternative runtimes that implement the Kubernetes CRI (Container Runtime Interface), like CRI-O. - - -The cluster setup script creates an OpenShift project (namespace), applies the custom resource definitions (CRD), adds the specified user to the ibm-cp4a-operator role, binds the role to the service account, and applies a security context constraint (SCC) for the Cloud Pak. - -The script also prompts the administrator to take note of the cluster host name and a dynamic storage class on the cluster. These names must be provided to the user who runs the deployment script. - -Use the following steps to complete the preparation: - -1. Download or clone the GitHub repository on your local machine and go to the `cert-kubernetes` directory. - ```bash - $ git clone git@github.com:icp4a/cert-kubernetes.git - $ cd cert-kubernetes - ``` -2. Login to the target cluster as the `` user. - ```bash - $ oc login https://: -u -p - ``` -3. Run the cluster setup script from where you downloaded the GitHub repository, and follow the prompts in the command window. - ```bash - $ cd scripts - $ ./cp4a-clusteradmin-setup.sh - ``` - - 1. Enter the name for a new project or an existing project (namespace). For example `cp4a-demo`. - 2. Enter an existing non-administrator user name in your cluster to run the deployment script. For example `cp4a-user`. - - When the script is finished all of the available storage class names are displayed as well as the infrastructure node name. Take a note of the class name that you want to use for the installation and the host name as they are both needed for the deployment script. - -## Step 2: Get access to the container images (by an installer) - -To get access to the Cloud Pak container images you must have an IBM Entitlement Registry key to pull the images from the IBM docker registry or download the Cloud Pak package (.tgz file) from Passport Advantage (PPA) and push the images to a local docker registry. The deployment script asks for the entitlement key or user credentials for the local registry. - -As the non-administrator user, you also need the container images for Db2 and OpenLDAP. -1. Download or clone the GitHub repository on your local machine and go to to `cert-kubernetes` directory. - ```bash - $ git clone git@github.com:icp4a/cert-kubernetes.git - $ cd cert-kubernetes - ``` - The scripts and Kubernetes descriptors are needed to install Cloud Pak for Automation. -2. To pull and push the Db2 and OpenLDAP images to your docker registry, run a script from a machine that is able to connect to the internet and the target image repository. - ```bash - $ ./loadPrereqImages.sh - ``` - -### Option 1: Create an entitlement key for the IBM Cloud Entitled Registry - -1. Log in to [MyIBM Container Software Library](https://myibm.ibm.com/products-services/containerlibrary) with the IBMid and password that are associated with the entitled software. - -2. In the **Container software library** tile, click **View library** and then click **Copy key** to copy the entitlement key to the clipboard. Take a note of the key so that the installer can enter it with the deployment script. - -### Option 2: Download the packages from PPA and load the images - -[IBM Passport Advantage (PPA)](https://www-01.ibm.com/software/passportadvantage/pao_customer.html) provides archives (.tgz) for the software. To view the list of Passport Advantage eAssembly installation images, refer to the [download document](https://www.ibm.com/support/pages/ibm-cloud-pak-automation-v2001-download-document). - -1. Download one or more PPA packages to a server that is connected to your Docker registry. -2. Check that you can run a docker command. - ```bash - $ docker ps - ``` -3. Log in to the Docker registry with a token. - ```bash - $ docker login $(oc registry info) -u -p $(oc whoami -t) - ``` - > **Note**: You can connect to a node in the cluster to resolve the `docker-registry.default.svc` parameter. - -4. Run a `kubectl` command to make sure that you have access to Kubernetes. - ```bash - $ kubectl cluster-info - ``` -5. Run the [`scripts/loadimages.sh`](../scripts/loadimages.sh) script to load the images into your Docker registry. Specify the two mandatory parameters in the command line. - - ``` - -p PPA archive files location or archive filename - -r Target Docker registry and namespace - -l Optional: Target a local registry - ``` - - The following example shows the input values in the command line. - ``` - $ ./loadimages.sh -p .tgz -r docker-registry.default.svc:5000/ - ``` - - > **Note**: The `project-name` variable is the name of the project created by the cluster setup script. If you want to use an external Docker registry, take a note of the OCP docker registry service name or the URL to the docker registry, so that you can enter it in the deployment script. If you connect remotely to the OCP cluster from a Linux host/VM then you must have Docker and the OpenShift Commandline Interface (CLI) installed. If you have access to the master node on the OCP v3.11 cluster, the CLI and Docker are already installed. - -6. Check that the images are pushed correctly to the registry. - ```bash - $ oc get is - ``` - -## Step 3: Run the deployment script (by an installer) - -Depending on the pattern that you want to install, the deployment script prepares the environment before installing the automation containers. The script applies a customized custom resource (CR) file, which is deployed by the Cloud Pak operator. The deployment script prompts the user to enter values to get access to the container images and to select what is installed with the deployment. - -1. Login to the OCP 3.11 cluster with the non-administrator user that the cluster administrator used in Step 1. For example: - ```bash - $ oc login -u cp4a-user -p cp4a-user - ``` -2. Run the deployment script from the local directory where you downloaded the GitHub repository, and follow the prompts in the command window. - ```bash - $ cd scripts - $ ./cp4a-deployment.sh - ``` - -> **Note:** The deployment script makes use of a custom resource (CR) template file for each pattern. The template names include "demo" and are found in the [descriptors/patterns](../descriptors/patterns) folder. The CR files are configured by the deployment script. However, you can copy these templates, configure them by hand, and apply the file from the kubectl command line if you want to run the steps manually. - -## Step 4: Verify that the automation containers are running - -The operator reconciliation loop can take some time. - -1. You can open the operator log to view the progress. - ```bash - $ oc logs -c operator -n - ``` - -2. Monitor the status of your pods with: - ```bash - $ oc get pods -w - ``` - -3. When all of the pods are *Running*, you can access the status of your services with the following command. - ```bash - $ oc status - ``` - -## Step 5: Access the services - -When all of the containers are running. - -1. Go to to `cert-kubernetes` directory on your local machine. - ```bash - $ cd cert-kubernetes - ``` -2. Login to the OCP 3.11 cluster with the non-administrator user that the administrator created in Step 1. For example: - ```bash - $ oc login -u cp4a-user -p cp4a-user - ``` -3. Run the post deployment script, which prints out the routes created by the pattern and the user credentials that you need to login to the web applications to get started. - ```bash - $ cd scripts - $ ./cp4a-post-deployment.sh - ``` - -## Step 6: List the default LDAP users and passwords - -After you found the service URLs and admin user credentials, you can also get a list of LDAP users. - -1. Get the `` by running the following command. The `` is the name of the pattern that you installed. - ```bash - $ oc get icp4acluster - ``` -2. Get the usernames and passwords for the LDAP users. - ```bash - $ oc get cm -openldap-customldif -o yaml - ``` - -## Step 7: Post-installation tasks - -If the pattern that you installed includes Business Automation Navigator (BAN) and the User Management Service (UMS), then you need to configure the Single Sign-On (SSO) logout for the Admin desktop. For more information, see [Configuring SSO logout between BAN and UMS](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_configbanumsssok8s.html]). - -## Troubleshoot -For more information, see [Troubleshooting a deployment for demonstration purposes](install_troubleshooting_ocp.md). - -# Uninstall a deployment pattern - -To uninstall the deployment, you can delete the namespace by running the following command: -```bash -$ oc delete project -``` -To uninstall the cluster role, cluster role binding, and the CRD run the following commands: -```bash -$ oc delete clusterrolebinding -cp4a-operator -$ oc delete clusterrole ibm-cp4a-operator -$ oc delete crd icp4aclusters.icp4a.ibm.com -``` - diff --git a/demo/install_troubleshooting_ocp.md b/demo/install_troubleshooting_ocp.md deleted file mode 100644 index 210b78ba..00000000 --- a/demo/install_troubleshooting_ocp.md +++ /dev/null @@ -1,169 +0,0 @@ -# Troubleshooting a deployment for demonstration purposes - -The troubleshooting information is divided into the following sections: - - - [Cluster admin setup script issues](install_troubleshooting_ocp.md#cluster-admin-setup-script-issues) - - [Db2 issues](install_troubleshooting_ocp.md#db2-issues) - - [Route issues](install_troubleshooting_ocp.md#route-issues) - -## Cluster admin setup script issues - -### Issue: During the execution of the cp4a-clusteradmin-setup.sh script the CRD fails to deploy - -If the following message is seen in the output, the user ('XYZ' in the example below) does not have cluster-admin permission: -``` -Start to create CRD, service account and role ..Error from server (Forbidden): error when retrieving current configuration of: -"/root/git/cert-kubernetes/descriptors/ibm_cp4a_crd.yaml": customresourcedefinitions.apiextensions.k8s.io "icp4aclusters.icp4a.ibm.com" is forbidden: User "XYZ" cannot get customresourcedefinitions.apiextensions.k8s.io at the cluster scope: no RBAC policy matched -``` - -1. Log out of the current OCP session (non-admin). - -2. Log in to OCP with the OCP cluster admin user. - ```bash - $ oc login -u dbaadmin - ``` - Where dbaadmin is the OC cluster admin user. - -## Db2 issues - -Db2 is installed as part of the prerequisites of the patterns. The following issues can be resolved by matching the source of the problem with the proposed solution to make Db2 operational again. - -### Issue: Intermittent issue where Db2 process is not listening on port 50000 - -If the `not listening on port 50000` message is found in the logs: - -1. Get the current running Db2 pod: - ```bash - $ oc get pod - ``` -2. Go to the pod: - ```bash - $ oc exec -it bash - ``` -3. Switch to the db2inst1 user: - ```bash - $ su - db2inst - ``` -4. Reapply the configuration: - ```bash - $ db2 update dbm cfg using SVCENAME 50000 - ``` -5. Restart Db2: - ```bash - $ db2stop - $ db2start - ``` - -### Issue: Db2 pod failed to start where db2u-release-db2u-0 pod shows 0/1 Ready - -This issue has the following symptoms in the Db2 pods: -``` -[5357278.440940] db2u_root_entrypoint.sh[20]: + sudo /opt/ibm/db2/V11.5.0.0/adm/db2licm -a /db2u/license/db2u-lic -[5357278.531782] db2u_root_entrypoint.sh[20]: LIC1416N  The license could not be added automatically.  Return code: "-100". -[5357278.535893] db2u_root_entrypoint.sh[20]: + [[ 156 -ne 0 ]] -[5357278.536085] db2u_root_entrypoint.sh[20]: + echo '(*) Unable to apply db2 license.' -[5357278.536177] db2u_root_entrypoint.sh[20]: (*) Unable to apply db2 license. -``` - -To mitigate the issue, you have a number of options: - - Option 1: Kill Db2 - - Option 2: Clean up Db2 and redeploy - - Option 3: Delete the project - - Option 4: Reboot the cluster - -**Option 1: Kill Db2** -1. Run the following command to get the worker node that db2u is running on: - ```bash - $ oc get pods -o wide - ``` -2. Run a ssh command as root on the worker node hosting Db2u: - ```bash - $ ssh root@ - ``` -3. Run the following command to kill the orphaned db2u semaphores: - ```bash - $ ipcrm -S 0x61a8 - ``` -4. Cleanup the affected project/namespace by running the following commands: - ```bash - $ oc get icp4acluster to get the custom resource name - $ oc delete icpa4acluster $name from step(a) - $ oc delete - ``` -5. Run the deployment script to start again. - -**Option 2: Clean Db2 and redeploy** -1. Get the custom resource name for icp4acluster - ```bash - $ oc get icp4acluster   - ``` -2. Delete the CR: - ```bash - $ oc delete icp4acluster $name - ``` - or - ```bash - $ oc delete -f $cr.yaml - ``` - The `$cr.yaml` is generated in the ./tmp directory, so you also need to delete the operator deployment by running the following command: - ```bash - $ oc delete - ``` -3. Make sure there is nothing left by running the following commands: - ```bash - $ oc get sts - $ oc get jobs - $ oc get deployment - $ oc get pvc | grep db2 - ``` -4. Run the deployment script to start again. - -**Option 3: Delete the project/namespace** -1. If options 1 or 2 don't work, delete the project and redeployment by running the following steps: - ```bash - $ oc delete project $project_name - ``` - -**Option 4: Reboot the entire cluster** -1. If none of the other options work, get the names of the nodes and reboot them: - ```bash - $ oc get no --no-headers | awk '{print $1}' - ``` -2. Reboot all of the nodes listed (reboot the worker nodes first, then the infrastructure node, and then the master node). - -### Issue: db2-release-db2u-restore-morph-job-xxxxx shows "Running", but fails to be "Completed" - -Run the following command to check and confirm this issue: -```bash -$ oc get pod -``` - -The command outputs a table showing the STATUS and READY columns: -```bash -NAME                                        READY   STATUS  -db2-release-db2u-restore-morph-job-xxxxx    1/1          Running -``` - -If the STATUS does not change to `Completed` after a few minutes. -1. Delete the Db2 pod by running the `oc delete` command:  - ```bash - $ oc delete pod db2-release-db2u-restore-morph-job-xxxxx - ``` -2. Confirm that the Db2 job is terminated and a new pod is up and running: - ```bash - $ oc get pod -w - ``` - When the job reads `Completed`, the pattern can continue to deploy. - -## Route issues - -### Issue: Generated routes do not work - -In some environments, route URLs contain the string `apps.`. However, the cp4a-clusteradmin-setup.sh script returns the hostname of the infrastructure node without this string. If you entered the hostname in the cp4a-post-deployment.sh script in an environment that uses `apps.`, the routes do not work. - -**Workaround:** -When you run the cp4a-deployment.sh script, add `apps.` to the infrastructure hostname. - -For example, if the cp4a-clusteradmin-setup.sh script outputs the infrastructure hostname as `ocp-master.tec.uk.ibm.com`, enter `ocp-master.apps.tec.uk.ibm.com` when you run the cp4a-post-deployment.sh script. - -Tip: You can find the existing route URL by running `oc get route --all-namespaces`, and extract the common pattern URL for the routes. diff --git a/demo/install_workers_ocp.md b/demo/install_workers_ocp.md deleted file mode 100644 index 3d3da4b2..00000000 --- a/demo/install_workers_ocp.md +++ /dev/null @@ -1,431 +0,0 @@ -# Automation Digital Worker with a pattern on Red Hat OpenShift 3.11 - -- [Installing Automation Digital Worker combined to a pattern](install_workers_ocp.md#installing-automation-digital-worker-combined-to-a-pattern) -- [Uninstalling Automation Digital Worker](install_workers_ocp.md#uninstalling-automation-digital-worker) -- [Troubleshooting](install_workers_ocp.md#troubleshooting) - - -# Installing Automation Digital Worker combined to a pattern - -- [Prerequisites](install_workers_ocp.md#prerequisites) -- [Task 1: Prepare your environment](install_workers_ocp.md#task-1-prepare-your-environment) -- [Task 2: Install Automation Applications](install_workers_ocp.md#task-2-install-automation-applications) -- [Task 3: Install Automation Digital Worker](install_workers_ocp.md#task-3-install-automation-digital-worker) -- [Task 4: Verify the installation](install_workers_ocp.md#task-4-verify-the-installation) -- [Task 5: Install Automation Content Analyzer (optional)](install_workers_ocp.md#task-5-install-automation-content-analyzer-optional) -- [Task 6: Install Operational Decision Manager (optional)](install_workers_ocp.md#task-6-install-operational-decision-manager-optional) - -## Prerequisites -Make sure you have access to the following configuration: -- A Red Hat OpenShift cluster v3.11 - -## Task 1: Prepare your environment - -Install the `oc` client on your local machine or where you plan to install Automation Digital Worker - -1. Select and download the desired openshift-client from https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/ - -2. Extract the `oc` client files - - Example:  - ``` - wget https://mirror.openshift.com/pub/openshift-v4/clients/ocp/latest/openshift-client-linux-4.3.1.tar.gz - tar -xvf ./openshift-client-linux-4.3.1.tar.gz - ``` - - On Linux, you can add the `oc` client to your path as follows: - ``` - mv oc /usr/local/bin/ - ``` - -## Task 2: Install Automation Applications - - -1. Create a project to install Automation Digital Worker - ``` - oc login ... - oc new-project - ``` - -2. Get the Applications pattern from GitHub - -Download or clone the following GitHub repository on your local machine and go to the `cert-kubernetes` directory. -``` -git clone https://github.com/icp4a/cert-kubernetes -$ cd cert-kubernetes -``` - -3. Run the install script - ``` - $ cd scripts - $ ./cp4a-clusteradmin-setup.sh - $ ./cp4a-deployment.sh  - ``` - - Pick option 5 "automation applications" - - Answer the script questions - - Put `iamapikey:yourkey` for the entitled registry - - At the end of the installation, you get an output like this one: - - ``` - The custom resource file used is: "/root/cert-kubernetes/scripts/.bak/.ibm_cp4a_cr_demo_application.yaml"... - ``` - - It is this CR file that you will modify in later steps. Copy it from the output. For example: - - `cp /root/cert-kubernetes/scripts/.bak/.ibm_cp4a_cr_demo_application.yaml ./mycr.yaml` - -4. Get the OCP infrastructure name - ``` - export INFRA_NAME= - export NAMESPACE=adwproject - ``` - `` is given by the `./cp4a-clusteradmin-setup.sh`script. - -5. Get the UMS credentials, you will need to enter them in later steps - - - The user name is `umsadmin` and the password is the value of `umsadminpassword`, which is randomly generated. -To get the password, look in OpenShift Console > Resources > Secrets > ibm-dba-ums-secret. - - -## Task 3: Install Automation Digital Worker - - 1. Create the SSL certificate and adw-tls-secret by copy-pasting the following code. - ``` - echo "$(date +%T) - ### Generating SSL certificate for ADW ###" - openssl genrsa -out server.key 2048 - openssl rsa -in server.key -out server.key - openssl req -sha256 -new -key server.key -out server.csr -subj "/CN=adwmanagement.$NAMESPACE.$INFRA_NAME" - openssl x509 -req -sha256 -days 365 -in server.csr -signkey server.key -out server.crt - echo "$(date +%T) - ### Creating secret to allow BAS communication ###" - oc create secret tls adw-tls-secret --key server.key --cert=server.crt - ``` - - 2. Create the Automation Digital Worker secret - - a. Copy the ADW secret template code below. - - b. Fill it in with the following values. - - - nmpUser => leave as is - - nmpPassword => leave as is - - SkillEncryptionSeed => leave as is - - oidcclientId => leave as is - - oidcclientpassword => leave as is - - oidcUserName => leave as is - - oidcPassword => umsadmin password encoded in base64. Can be found in the secret `ibm-dba-ums-secret`. Example for a macOS user: `echo -n | base64` - - registryPassword => found in application-rr-admin-secret (writePassword to be encoded in base64) - - registryUser => leave as is - - servert.crt => certificate generated just before in base64 - - server.key => key generated just before in base64 - - oc apply -f adw-secret.yaml - - ADW secret template code: - - ``` - apiVersion: v1 -kind: Secret -metadata: -  name: adw-secret -type: Opaque -data: - npmUser: YmFpdw== -  npmPassword: YmFpdw== -  skillEncryptionSeed: YmFpdy1za2lsbHM= -  oidcClientId: bXljbGllbnRpZCAtbgo= -  oidcClientSecret: Y2xpZW50cGFzc3dvcmQ= -  oidcUserName: dW1zYWRtaW4= -  oidcPassword: WHpUeTN6ZzVtQXFZWE5oT2pyMU8= -  registryPassword: eXBHWlB2TWxNVGZudWRS -  registryUser: d3JpdGVy -  server.crt: -cxLwpUeUFhUWVsT3Nad3NLcU5KWldtQ0FLWVZmTmZnWkVQeE96dnIrbDExU2dzWEU5Q0tTK2JjOFJYejFBemllRFJ1Cm9HSU96T0Uxc2ZTRG93WnZZL0J2TVVsT3V0bXd1YUJXNFpDd25ib29URl -VtSE0wQ0F3RUFBVEFOQmdrcWhraUcKOXcwQkFRc0ZBQU9DQVFFQXBweEQwbEtQU2xpWk5ZTCthWGM4VFhoUDN1TFFsMFoxenNzK0ZNc2Jodmc2Z2F6YQpVbW9sR081S2tyV3VYZHZDYW0zbkdBR2 -xTakRxT21pZnNZTVJxejh6dE93TWQ0TXN0YjdJQzVQUlBGWXRIanJ0ClJFQU81NnVzZVcrNGFQWVNZZEcxUmNZa0o3MFlVRnY1UllXOEcrTy9Rd0NrcTRHazdVcVptMVA4VUU2OUtTaXYKY05Oa1 -cyYmpRdWIrNTJpTDlWaVJpcWpQcW5EM1ZRY1FxQzZLOURjN3o5dXc3VU8yVmNyWmJJcE45UHhzWTEreQpwSTVQdlRic0pTMXhNOVNkcVlHUDk3YUtnK24vVkhwV1oyOU5ydEIzT1g0UktkcjRjUW -RWUEZobUpSS20yTkZKCkx3YmRKRzB3MTJrTk9ESXYrZGJUVlNRQzIzcy84U0pZSWsycFZnPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= -  server.key: -BMOGtiVmFmOWJrMlFyMmpTcS9ZZGFsejNrbjg2eExBMUlnTUg4ejByYndqY3ZpUWMyVzA4CmZtVlJlbWxqQ3k0S2NQM1hpN01LenNSdWwzSXg1aEl4dTZHWWhCbzhmOWtSY21DZ2lDbDVsY1ZKYU -E2am1nUVgKd0gwa2hUN3lkQ2RqcVBkTVFYODA4d2gvb0gxZFV3eXpyWVVLazRWQ0tJcFJzWDU5Q2NsSEZ6d3VnSEJIV3cwYQorVElEN3E0eVJYZXRVbGZESkVES2tHTVVEOW1QN21nR1BLcFFtTV -lkbTB6NlVwa002c0dKTXJvUHR1ZmhYOWtSCnZMNUNFM2t5bXRkZ2sxdEdDRWo1UXBycVJzMy96UERyOTloZUx2ZzJPM0QxNlFVSkg2NGFndFNXVWxtM2JpSWkKUUR3eG5OS2RBb0dCQU5xaGlDRF -JqYWNaUFFKMlpvK1UzSWpJZ3RWZFJSVDBCby9wUThJVjkwVDVibXZGeE00dwpJOVlLSUpEd0tNNVZMZ3lYSXB2U1pqNHlhS211bTVRbmlxTlpvU3ZqcjdpV2dvcmt1NktTUzdxRVZzaDFQOGVHCm -JoYjBIVXQ1QUJKY2VCc25xNUZqeGpRbUJVSWNrQWpVWWhKQlJqdUpWUlI4MDl3Mk5UZmhUcWhQQW9HQkFNaGsKenY5bG1KQU5oTkFuTDE1dzRGYTZubmw2WXZFVWtkT3IwdTZkckp2Vi9ldkxKcz -lXWXc2ZEdTcjNaTHdRWkJmaAo3bnFrZWg0M2xxR3o2NHpFT0VvZkRPSGIvSlNvai93SWdaWWYzUUsvUW9CODNUZzF6WGdSMjFTYkJvdFV3Zzh0CmFUcngwdFYwNm5oNmNWc2N2ajhUdS82VExWTU -5JU2xUN1RJMVNNWWpBb0dCQUpnTzBzdm9rem5OenVZQWpWSjkKZVdqVTlGSUxYYm0yQXo1aVFaTWlqZWoyQm51RWdGM2JrNEVSYUJjR1FZdElLUS91cWM1d1psWUozMHRzdXA0dgpaamc0WldWT0 -pYQWZsa21kem5iQ2cxTUZLZ3FmcWExTzdSQ2YxaVFnMHhEeUtVTFJzMzBhUk1jT1RvZnRyNnZFCmN6VTVHdXpibGtYNmo2dFFOSEZhRGNmM0FvR0FDcmt5Um9KMlJvY2lxMHpkZ1ExRFJBRGFpQi -tmZWMwaS9KTysKUnR5VEp3ZXRmZGV5TFBndmR0RzdUZ3hOREs5SDIrdFFLcW53aXZ0b2lTQ3FveTdBNEY2Ry92TVpzZzdQSGFxSwozTEM1ei9MU0tUUUZWb095aWhGU2psVjVaUzFVOFNENTk1aU -hNcnI5N1JLSVRGcmVaMXV6L0t4OWlXc3pjaFcyCnBMQXJROGNDZ1lBL01KcUQxM0NXVDJ1S0crMU9QdStJbW1jdEJBbkt5Q0k5UlRXMjJKcmRBalZ1bjdnc0MrVzEKT1JFaGYxVVJieXBXVGFFRE -ZveFcra0tJLytIYWdzMGRaTmw3RitMZy9uMFlCa29oVUw5WVczSUFvYzc0UEVlTQpvb2paMVpTSmkrMWNscS9SMFBpT21LMlNwQm1FZTJiVGV2dFBUVEUySG1GRU1KYkwvWEZwUGc9PQotLS0tLU -VORCBSU0EgUFJJVkFURSBLRVktLS0tLQo= -``` - -3. Modify the `cert-kubernetes/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml` CR file that was generated when you ran the Automation Applications installation script in Task 2. - - a. Add a `adw_configuration` block to the CR file (see below), at the same level as the other configuration blocks (like `ums_configuration` or `basstudio_configuration`) - - b. In the `adw_configuration` block, set the URLs for - - designer (example: adw.mycluster.mydomain.com) - - management - - runtime  - - UMS (you can find the values of the routes "application-ums-route" and you have to add something at the end, example : ) - - Resource Registry (you can find the values in the "routes" of your cluster and you have to add something at the end, example : ) - - c. In the `bastudio_configuration`block, add the TLS trust list - -CR file: - -``` -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: -  name: application -  labels: -    app.kubernetes.io/instance: ibm-dba -    app.kubernetes.io/managed-by: ibm-dba -    app.kubernetes.io/name: ibm-dba -    release: 20.0.1 -spec: -  adw_configuration: -    adwSecret: adw-secret -    designer: -      externalUrl: 'https://adw.application.mycluster.mydomain.com' -      image: -        repository: cp.icr.io/cp/cp4a/adw/adw-designer -        tag: 20.0.1 -      service: -        type: Route -    global: -      imagePullSecret: admin.registrykey -      kubernetes: -        serviceAccountName: '' -    init: -      image: -        repository: cp.icr.io/cp/cp4a/adw/adw-init -        tag: 20.0.1 -    management: -      externalUrl: 'https://adwmanagement.application.mycluster.mydomain.com' -      image: -        repository: cp.icr.io/cp/cp4a/adw/adw-management -        tag: 20.0.1 -      service: -        type: Route -    oidc: -      endpoint: https://ums.application.mycluster.mydomain.com/oidc/endpoint/ums -    registry: -      endpoint: 'https://rr.application.mycluster.mydomain.com/v3beta' -    runtime: -      externalUrl: 'https://adwruntime.application.mycluster.mydomain.com' -      image: -        repository: cp.icr.io/cp/cp4a/adw/adw-runtime -        tag: 20.0.1 -      service: -        type: Route -    setup: -      image: -        repository: cp.icr.io/cp/cp4a/adw/adw-setup -        tag: 20.0.1 -  bastudio_configuration: -    images: -      bastudio: -        repository: cp.icr.io/cp/cp4a/bas/bastudio -        tag: 20.0.1 -    jms_server: -      image: -        repository: cp.icr.io/cp/cp4a/bas/jms -        tag: 20.0.1 -    playback_server: -      images: -        db_job: -          repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db -          tag: 20.0.1 -        solution_server: -          repository: cp.icr.io/cp/cp4a/bas/solution-server -          tag: 20.0.1 -    tls: -      tls_trust_list: -        - adw-tls-secret -  navigator_configuration: -    image: -      repository: cp.icr.io/cp/cp4a/ban/navigator-sso -      tag: 20.0.1 -  resource_registry_configuration: -    images: -      resource_registry: -        repository: cp.icr.io/cp/cp4a/aae/dba-etcd -        tag: 20.0.1 -  shared_configuration: -    image_pull_secrets: -      - admin.registrykey -    images: -      busybox: -        repository: docker.io/library/busybox -        tag: latest -      db2: -        repository: docker.io/ibmcom/db2 -        tag: 11.5.1.0-CN1 -      db2_auxiliary: -        repository: docker.io/ibmcom/db2u.auxiliary.auth -        tag: 11.5.1.0-CN1 -      db2_etcd: -        repository: quay.io/coreos/etcd -        tag: v3.3.10 -      db2_init: -        repository: docker.io/ibmcom/db2u.instdb -        tag: 11.5.1.0-CN1 -      db2u_tools: -        repository: docker.io/ibmcom/db2u.tools -        tag: 11.5.1.0-CN1 -      dbcompatibility_init_container: -        repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer -        tag: 20.0.1 -      keytool_init_container: -        repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer -        tag: 20.0.1 -      keytool_job_container: -        repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer -        tag: 20.0.1 -      openldap: -        repository: osixia/openldap -        tag: 1.3.0 -      umsregistration_initjob: -        repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob -        tag: 20.0.1 -    root_ca_secret: '{{ meta.name }}-root-ca' -    sc_deployment_hostname_suffix: '{{ meta.name }}.mycluster.mydomain.com' -    sc_deployment_patterns: application -    sc_deployment_platform: OCP -    sc_deployment_type: demo -    storage_configuration: -      sc_dynamic_storage_classname: managed-nfs-storage -  ums_configuration: -    images: -      ums: -        repository: cp.icr.io/cp/cp4a/ums/ums -        tag: 20.0.1 - ``` - -4. Re-apply the CR file -``` -oc apply -f cert-kubernetes/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml -``` - -## Task 4: Verify the installation - -1. Check that all the pods are in a `Running` or `Completed` status - -2. Check that Automation Digital Worker is available in Business Automation Studio - - a. Look at the routes in the OpenShift Console, find Business Automation Studio, and then add `/BAStudio` to the Business Automation Studio URL - - b. Open a Browser on the Business Automation Studio URL - - c. Open Automation Digital Worker from Business Automation Studio - -3. Check that you can go back to Business Automation Studio by clicking the breadcrumbs in Automation Digital Worker. - -## Task 5: Install Automation Content Analyzer (optional) - - -1. Create a project to install Content Analyzer - ``` - oc new-project - ``` - -2. Get the Content Analyzer pattern from GitHub - - We assume you already cloned the git repository: - ``` - git clone https://github.com/icp4a/cert-kubernetes - ``` - -3. Run the install script - ``` - $ cd scripts - $ ./cp4a-clusteradmin-setup.sh - $ ./cp4a-deployment.sh  - ``` - - Answer the script questions - - Put `iamapikey:yourkey` for the entitled registry - -4. Verify that the installation is complete - - The operator reconciliation loop can take some time. - - Open the operator log to view the progress: - ``` - $ oc logs -c operator -n - ``` - - Monitor the status of your pods with: - ``` - $ oc get pods -w - ``` - - When all of the pods are `Running`, you can access the status of your services with the following command: - ``` - $ oc status - ``` - - 5. Configure Digital Worker to use Content Analyzer - - For details, refer to https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.adw/topics/con_task_create.html - -## Task 6: Install Operational Decision Manager (optional) - - -1. Create a project to install ODM - ``` - oc new-project - ``` - -2. Get the ODM pattern from GitHub - - We assume you already cloned the git repository: - ``` - git clone https://github.com/icp4a/cert-kubernetes - ``` - -3. Run the install script - ``` - $ cd scripts - $ ./cp4a-clusteradmin-setup.sh - $ ./cp4a-deployment.sh  - ``` - - Answer the script questions - - Put `iamapikey:yourkey` for the entitled registry - -4. Verify that the installation is complete - - The operator reconciliation loop can take some time. - - Open the operator log to view the progress: - ``` - $ oc logs -c operator -n - ``` - - Monitor the status of your pods with: - ``` - $ oc get pods -w - ``` - - When all of the pods are `Running`, you can access the status of your services with the following command: - ``` - $ oc status - ``` - 5. Configure Digital Worker to use ODM - - For details, refer to https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.adw/topics/con_task_create.html - -# Uninstalling Automation Digital Worker - -To uninstall Automation Digital Worker, delete the namespace by running the following command: -``` -oc delete project -``` - -# Troubleshooting - -If Automation Digital Worker is not available in Business Automation Studio, restart the setup job with the following command - -``` -oc get job dba-adw-2001-setup -o json | jq 'del(.spec.selector)' | jq 'del(.spec.template.metadata.labels)' | kubectl replace --force -f - diff --git a/descriptors/cluster_role.yaml b/descriptors/cluster_role.yaml index a794fdfa..e61de652 100644 --- a/descriptors/cluster_role.yaml +++ b/descriptors/cluster_role.yaml @@ -16,35 +16,47 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 rules: - apiGroups: - security.openshift.io - attributeRestrictions: null resources: - securitycontextconstraints verbs: + - create - get - list + - patch - update - - create - watch - - patch + - use - apiGroups: - "" - attributeRestrictions: null resources: - securitycontextconstraints + - namespaces verbs: + - create - get - list + - patch - update - - create - watch - - patch - apiGroups: - icp4a.ibm.com resources: - '*' verbs: + - '*' +- apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - '*' +- apiGroups: + - policy + resources: + - podsecuritypolicies + verbs: - '*' \ No newline at end of file diff --git a/descriptors/cluster_role_binding.yaml b/descriptors/cluster_role_binding.yaml index 5366e4e4..ff3f31b3 100644 --- a/descriptors/cluster_role_binding.yaml +++ b/descriptors/cluster_role_binding.yaml @@ -16,7 +16,7 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 roleRef: name: ibm-cp4a-operator subjects: diff --git a/descriptors/common-services/crds/app_registry.yaml b/descriptors/common-services/crds/app_registry.yaml new file mode 100644 index 00000000..e9ccfcb5 --- /dev/null +++ b/descriptors/common-services/crds/app_registry.yaml @@ -0,0 +1,12 @@ +apiVersion: operators.coreos.com/v1 +kind: OperatorSource +metadata: + name: opencloud-operators + namespace: openshift-marketplace +spec: + authorizationToken: {} + displayName: IBMCS Operators + endpoint: https://quay.io/cnr + publisher: IBM + registryNamespace: opencloudio + type: appregistry \ No newline at end of file diff --git a/descriptors/common-services/crds/operator.ibm.com_operandconfigs_crd.yaml b/descriptors/common-services/crds/operator.ibm.com_operandconfigs_crd.yaml new file mode 100755 index 00000000..5df80c87 --- /dev/null +++ b/descriptors/common-services/crds/operator.ibm.com_operandconfigs_crd.yaml @@ -0,0 +1,103 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: operandconfigs.operator.ibm.com +spec: + additionalPrinterColumns: + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.phase + description: Current Phase + name: Phase + type: string + - JSONPath: .metadata.creationTimestamp + name: Created At + type: string + group: operator.ibm.com + names: + kind: OperandConfig + listKind: OperandConfigList + plural: operandconfigs + shortNames: + - opcon + singular: operandconfig + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OperandConfig is the Schema for the operandconfigs API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperandConfigSpec defines the desired state of OperandConfig + properties: + services: + description: Services is a list of configuration of service + items: + description: ConfigService defines the configuration of the service + properties: + name: + description: Name is the subscription name + type: string + spec: + additionalProperties: + type: object + description: Spec is the configuration map of custom resource + type: object + state: + description: State is a flag to enable or disable service + type: string + required: + - name + - spec + type: object + type: array + type: object + status: + description: OperandConfigStatus defines the observed state of OperandConfig + properties: + phase: + description: Phase describes the overall phase of operands in the OperandConfig + type: string + serviceStatus: + additionalProperties: + description: CrStatus defines the status of the custom resource + properties: + customResourceStatus: + additionalProperties: + description: ServicePhase defines the service status + type: string + type: object + type: object + description: ServiceStatus defines all the status of a operator + type: object + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/descriptors/common-services/crds/operator.ibm.com_operandregistries_crd.yaml b/descriptors/common-services/crds/operator.ibm.com_operandregistries_crd.yaml new file mode 100755 index 00000000..705d8fea --- /dev/null +++ b/descriptors/common-services/crds/operator.ibm.com_operandregistries_crd.yaml @@ -0,0 +1,155 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: operandregistries.operator.ibm.com +spec: + additionalPrinterColumns: + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.phase + description: Current Phase + name: Phase + type: string + - JSONPath: .metadata.creationTimestamp + name: Created At + type: string + group: operator.ibm.com + names: + kind: OperandRegistry + listKind: OperandRegistryList + plural: operandregistries + shortNames: + - opreg + singular: operandregistry + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OperandRegistry is the Schema for the operandregistries API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: OperandRegistrySpec defines the desired state of OperandRegistry + properties: + operators: + description: Operators is a list of operator OLM definition + items: + description: Operator defines the desired state of Operators + properties: + channel: + description: Name of the channel to track + type: string + description: + description: Description of a common service + type: string + installPlanApproval: + description: Approval mode for emitted InstallPlans + type: string + name: + description: A unique name for the operator whose operand may + be deployed + type: string + namespace: + description: The namespace in which operator's operand should + be deployed + type: string + packageName: + description: Name of the package that defines the application + type: string + scope: + description: 'A scope indicator, either public or private Valid + values are: - "private" (default): deployment only request from + the containing names; - "public": deployment can be requested + from other namespaces;' + enum: + - public + - private + type: string + sourceName: + description: Name of a CatalogSource that defines where and how + to find the channel + type: string + sourceNamespace: + description: The Kubernetes namespace where the CatalogSource + used is located + type: string + targetNamespaces: + description: The target namespace of the OperatorGroup + items: + type: string + type: array + required: + - channel + - name + - packageName + - sourceName + - sourceNamespace + type: object + type: array + type: object + status: + description: OperandRegistryStatus defines the observed state of OperandRegistry + properties: + operatorsStatus: + additionalProperties: + description: OperatorStatus defines operators status and the number + of reconcile request + properties: + phase: + description: Phase is the state of operator + type: string + reconcileRequests: + description: RecondileRequests store the namespace/name of all + the requests + items: + description: ReconcileRequest records the information of the + operandRequest + properties: + name: + description: Name defines the name of request + type: string + namespace: + description: Namespace defines the namespace of request + type: string + required: + - name + - namespace + type: object + type: array + type: object + description: OperatorsStatus defines operators status and the number + of reconcile request + type: object + phase: + description: Phase describes the overall phase of operators in the OperandRegistry + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/descriptors/common-services/crds/operator.ibm.com_operandrequests_crd.yaml b/descriptors/common-services/crds/operator.ibm.com_operandrequests_crd.yaml new file mode 100755 index 00000000..9efe3591 --- /dev/null +++ b/descriptors/common-services/crds/operator.ibm.com_operandrequests_crd.yaml @@ -0,0 +1,183 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: operandrequests.operator.ibm.com +spec: + additionalPrinterColumns: + - JSONPath: .metadata.creationTimestamp + name: Age + type: date + - JSONPath: .status.phase + description: Current Phase + name: Phase + type: string + - JSONPath: .metadata.creationTimestamp + name: Created At + type: string + group: operator.ibm.com + names: + kind: OperandRequest + listKind: OperandRequestList + plural: operandrequests + shortNames: + - opreq + singular: operandrequest + scope: Namespaced + subresources: + status: {} + validation: + openAPIV3Schema: + description: OperandRequest is the Schema for the operandrequests API + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds' + type: string + metadata: + type: object + spec: + description: The OperandRequestSpec identifies one or more specific operands + (from a specific Registry) that should actually be installed + properties: + requests: + description: Requests defines a list of operands installation + items: + description: Request identifies a operand detail + properties: + description: + description: Description is an optional description for the request + type: string + operands: + description: Operands deines a list of the OperandRegistry entry + for the operand to be deployed + items: + description: Operand defines the name and binding information + for one operator + properties: + bindings: + additionalProperties: + description: SecretConfigmap is a pair of Secret and/or + Configmap + properties: + configmap: + description: The configmap field identifies a configmap + object, if any, that should be shared with the adopter/requestor + type: string + secret: + description: The secret field names an existing secret, + if any, that has been created and holds information + that is to be shared with the adopter. + type: string + type: object + description: The bindings section is used to specify names + of secret and/or configmap. + type: object + name: + description: Name of the operand to be deployed + type: string + required: + - name + type: object + type: array + registry: + description: Specifies the name in which the OperandRegistry reside. + type: string + registryNamespace: + description: Specifies the namespace in which the OperandRegistry + reside. The default is the current namespace in which the request + is defined. + type: string + required: + - operands + - registry + type: object + type: array + required: + - requests + type: object + status: + description: OperandRequestStatus defines the observed state of OperandRequest + properties: + conditions: + description: Conditions represents the current state of the Request + Service + items: + description: Condition represents the current state of the Request + Service A condition might not show up if it is not happening + properties: + lastTransitionTime: + description: Last time the condition transitioned from one status + to another + type: string + lastUpdateTime: + description: The last time this condition was updated + type: string + message: + description: A human readable message indicating details about + the transition + type: string + reason: + description: The reason for the condition's last transition + type: string + status: + description: Status of the condition, one of True, False, Unknown + type: string + type: + description: Type of condition. + type: string + required: + - status + - type + type: object + type: array + members: + description: Members represnets the current operand status of the set + items: + description: MemberStatus show if the Operator is ready + properties: + name: + description: The member name are the same as the subscription + name + type: string + phase: + description: The operand phase include None, Creating, Running, + Failed + properties: + operandPhase: + description: OperandPhase show the deploy phase of the operator + instance + type: string + operatorPhase: + description: OperatorPhase show the deploy phase of the operator + type: string + type: object + required: + - name + type: object + type: array + phase: + description: Phase is the cluster running phase + type: string + type: object + type: object + version: v1alpha1 + versions: + - name: v1alpha1 + served: true + storage: true diff --git a/descriptors/common-services/crds/operator.yaml b/descriptors/common-services/crds/operator.yaml new file mode 100755 index 00000000..c99a8d48 --- /dev/null +++ b/descriptors/common-services/crds/operator.yaml @@ -0,0 +1,43 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: operand-deployment-lifecycle-manager +spec: + replicas: 1 + selector: + matchLabels: + name: operand-deployment-lifecycle-manager + template: + metadata: + labels: + name: operand-deployment-lifecycle-manager + app.kubernetes.io/instance: operand-deployment-lifecycle-manager + annotations: + productName: "IBM Cloud Platform Common Services" + productID: "068a62892a1e4db39641342e592daa25" + productVersion: "3.3.0" + productMetric: "FREE" + spec: + serviceAccountName: operand-deployment-lifecycle-manager + containers: + - name: operand-deployment-lifecycle-manager + # Replace this with the built image name + image: quay.io/opencloudio/odlm:1.1.0 + command: + - odlm + args: + - "-v=2" + imagePullPolicy: Always + env: + - name: WATCH_NAMESPACE + value: "" + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: OPERATOR_NAME + value: "operand-deployment-lifecycle-manager" \ No newline at end of file diff --git a/descriptors/common-services/crds/operator_group.yaml b/descriptors/common-services/crds/operator_group.yaml new file mode 100644 index 00000000..269cc9d9 --- /dev/null +++ b/descriptors/common-services/crds/operator_group.yaml @@ -0,0 +1,8 @@ +apiVersion: operators.coreos.com/v1alpha2 +kind: OperatorGroup +metadata: + name: operatorgroup + namespace: common-service +spec: + targetNamespaces: + - common-service \ No newline at end of file diff --git a/descriptors/common-services/crds/operator_operandconfig_cr.yaml b/descriptors/common-services/crds/operator_operandconfig_cr.yaml new file mode 100755 index 00000000..cede74cd --- /dev/null +++ b/descriptors/common-services/crds/operator_operandconfig_cr.yaml @@ -0,0 +1,71 @@ +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandConfig +metadata: + name: common-service +spec: + services: + - name: ibm-metering-operator + spec: + metering: {} + meteringUI: {} + - name: ibm-licensing-operator + spec: + IBMLicensing: {} + - name: ibm-mongodb-operator + spec: + mongoDB: {} + - name: ibm-cert-manager-operator + spec: + certManager: {} + issuer: {} + certificate: {} + clusterIssuer: {} + - name: ibm-iam-operator + spec: + authentication: {} + oidcclientwatcher: {} + pap: {} + policycontroller: {} + policydecision: {} + secretwatcher: {} + securityonboarding: {} + - name: ibm-healthcheck-operator + spec: + healthService: {} + - name: ibm-commonui-operator + spec: + commonWebUI: {} + legacyHeader: {} + - name: ibm-management-ingress-operator + spec: + managementIngress: {} + - name: ibm-ingress-nginx-operator + spec: + nginxIngress: {} + - name: ibm-auditlogging-operator + spec: + auditLogging: {} + - name: ibm-catalog-ui-operator + spec: + catalogUI: {} + - name: ibm-platform-api-operator + spec: + platformApi: {} + - name: ibm-helm-api-operator + spec: + helmApi: {} + - name: ibm-helm-repo-operator + spec: + helmRepo: {} + - name: ibm-monitoring-exporters-operator + spec: + exporter: {} + - name: ibm-monitoring-prometheusext-operator + spec: + prometheusExt: {} + - name: ibm-monitoring-grafana-operator + spec: + grafana: {} + - name: ibm-elastic-stack-operator + spec: + elasticStack: {} \ No newline at end of file diff --git a/descriptors/common-services/crds/operator_operandregistry_cr.yaml b/descriptors/common-services/crds/operator_operandregistry_cr.yaml new file mode 100755 index 00000000..5a3c4c08 --- /dev/null +++ b/descriptors/common-services/crds/operator_operandregistry_cr.yaml @@ -0,0 +1,144 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRegistry +metadata: + name: common-service +spec: + operators: + - name: ibm-metering-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-metering-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: The service used to meter workloads in a kubernetes cluster + - name: ibm-licensing-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-licensing-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: The service used to management the license in a kubernetes cluster + - name: ibm-mongodb-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-mongodb-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: The service used to create mongodb in a kubernetes cluster + - name: ibm-cert-manager-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-cert-manager-operator + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of cert-manager service. + - name: ibm-iam-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-iam-operator + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of iam service. + - name: ibm-healthcheck-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-healthcheck-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of health check service. + - name: ibm-commonui-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-commonui-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: The service that services the login page, common header, LDAP, and Team resources pages + - name: ibm-management-ingress-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-management-ingress-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of management ingress service. + - name: ibm-ingress-nginx-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-ingress-nginx-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of ingress nginx service. + - name: ibm-auditlogging-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-auditlogging-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of auditlogging service. + - name: ibm-catalog-ui-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-catalog-ui-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of catalog UI service. + - name: ibm-platform-api-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-platform-api-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of Platform API service. + - name: ibm-helm-api-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-helm-api-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of Helm API service. + - name: ibm-helm-repo-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-helm-repo-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator for managing deployment of Helm repository service. + - name: ibm-monitoring-exporters-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-monitoring-exporters-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator to provision node-exporter, kube-state-metrics and collectd exporter with tls enabled. + - name: ibm-monitoring-prometheusext-operator + namespace: ibm-common-services + channel: stable-v1 + packageName: ibm-monitoring-prometheusext-operator-app + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + description: Operator to deploy Prometheus and Alertmanager instances with RBAC enabled. It will also enable Multicloud monitoring. + - channel: stable-v1 + description: Operator to deploy Grafana instances with RBAC enabled. + name: ibm-monitoring-grafana-operator + namespace: ibm-common-services + packageName: ibm-monitoring-grafana-operator-app + scope: private + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace + - channel: stable-v1 + description: Operator that installs and manages Elastic Stack logging service instances. + name: ibm-elastic-stack-operator + namespace: ibm-common-services + packageName: ibm-elastic-stack-operator-app + scope: private + sourceName: opencloud-operators + sourceNamespace: openshift-marketplace diff --git a/descriptors/common-services/crds/operator_operandrequest_cr.yaml b/descriptors/common-services/crds/operator_operandrequest_cr.yaml new file mode 100755 index 00000000..19109963 --- /dev/null +++ b/descriptors/common-services/crds/operator_operandrequest_cr.yaml @@ -0,0 +1,28 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service +spec: + requests: + - registry: common-service + registryNamespace: ibm-common-services + operands: + - name: ibm-cert-manager-operator + - name: ibm-mongodb-operator + - name: ibm-iam-operator + - name: ibm-management-ingress-operator + - name: ibm-licensing-operator + - name: ibm-metering-operator + - name: ibm-commonui-operator + - name: ibm-monitoring-grafana-operator + - name: ibm-monitoring-prometheusext-operator diff --git a/descriptors/common-services/crds/operator_source.yaml b/descriptors/common-services/crds/operator_source.yaml new file mode 100644 index 00000000..abcf7884 --- /dev/null +++ b/descriptors/common-services/crds/operator_source.yaml @@ -0,0 +1,22 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: operators.coreos.com/v1 +kind: OperatorSource +metadata: + name: opencloud-operators + namespace: openshift-marketplace +spec: + authorizationToken: {} + displayName: IBMCS Operators + endpoint: https://quay.io/cnr + publisher: IBM + registryNamespace: opencloudio + type: appregistry \ No newline at end of file diff --git a/descriptors/common-services/crds/operator_subscription.yaml b/descriptors/common-services/crds/operator_subscription.yaml new file mode 100644 index 00000000..20c594f7 --- /dev/null +++ b/descriptors/common-services/crds/operator_subscription.yaml @@ -0,0 +1,21 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: operators.coreos.com/v1alpha1 +kind: Subscription +metadata: + name: ibm-common-service-operator + namespace: common-service +spec: + channel: stable-v1 # dev channel is for development purpose only + installPlanApproval: Automatic + name: ibm-common-service-operator + source: opencloud-operators + sourceNamespace: openshift-marketplace diff --git a/descriptors/common-services/roles/role.yaml b/descriptors/common-services/roles/role.yaml new file mode 100644 index 00000000..d65c0de7 --- /dev/null +++ b/descriptors/common-services/roles/role.yaml @@ -0,0 +1,82 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + creationTimestamp: null + name: operand-deployment-lifecycle-manager +rules: +- apiGroups: + - "" + resources: + - pods + - services + - services/finalizers + - endpoints + - persistentvolumeclaims + - events + - configmaps + - secrets + - namespaces + verbs: + - '*' +- apiGroups: + - apps + resources: + - deployments + - daemonsets + - replicasets + - statefulsets + verbs: + - '*' +- apiGroups: + - apps + resourceNames: + - operand-deployment-lifecycle-manager + resources: + - deployments/finalizers + verbs: + - update +- apiGroups: + - "" + resources: + - pods + verbs: + - get + - list +- apiGroups: + - apps + resources: + - replicasets + - deployments + verbs: + - get +- apiGroups: + - operator.ibm.com + - operators.coreos.com + resources: + - '*' + - operandregistries + - operandconfigs + - operandrequests + - operandbindinfos + verbs: + - '*' +- apiGroups: + - '*' + resources: + - '*' + verbs: + - '*' +- nonResourceURLs: + - '*' + verbs: + - '*' diff --git a/descriptors/common-services/roles/role_binding.yaml b/descriptors/common-services/roles/role_binding.yaml new file mode 100755 index 00000000..3a074c35 --- /dev/null +++ b/descriptors/common-services/roles/role_binding.yaml @@ -0,0 +1,22 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: operand-deployment-lifecycle-manager +subjects: +- kind: Group + name: system:serviceaccounts + apiGroup: rbac.authorization.k8s.io +roleRef: + kind: ClusterRole + name: operand-deployment-lifecycle-manager + apiGroup: rbac.authorization.k8s.io diff --git a/descriptors/common-services/roles/service_account.yaml b/descriptors/common-services/roles/service_account.yaml new file mode 100755 index 00000000..6bce2de0 --- /dev/null +++ b/descriptors/common-services/roles/service_account.yaml @@ -0,0 +1,14 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: v1 +kind: ServiceAccount +metadata: + name: operand-deployment-lifecycle-manager diff --git a/descriptors/common-services/scripts/common-services.sh b/descriptors/common-services/scripts/common-services.sh new file mode 100755 index 00000000..57f7c20d --- /dev/null +++ b/descriptors/common-services/scripts/common-services.sh @@ -0,0 +1,208 @@ +#!/bin/bash +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +ARG=$1 +ASYNC=$2 +NAMESPACE=cs-operands-installer +registry_namespace=default +registry_svc=docker-registry +LOCAL_PORT=5000 +CUR_DIR=$(pwd) +if [ -n "$(echo $CUR_DIR | grep scripts)" ]; then + PARENT_DIR=$(dirname "$PWD") +else + PARENT_DIR=$CUR_DIR +fi + +COMMON_SERVICES_INSTALL_DIRECTORY_OCP311=${PARENT_DIR}/descriptors/common-services/scripts/ + + +function create_image_bot() { + oc get namespace ibm-common-services &>/dev/null || oc create namespace ibm-common-services + oc -n ibm-common-services get serviceaccount image-bot &>/dev/null || oc -n ibm-common-services create serviceaccount image-bot + oc -n ibm-common-services policy add-role-to-user registry-editor system:serviceaccount:ibm-common-services:image-bot +} + +function set_registry_portforward() { + echo "Start registry port forward process" + local registry_port=$(oc get svc $registry_svc -n $registry_namespace -o jsonpath='{.spec.ports[0].port}') + local port_fwd_obj=$(oc get pods -n $registry_namespace | awk '/^docker-registry-/ {print $1}' | head -n1) + oc port-forward "$port_fwd_obj" -n "$registry_namespace" "$LO:qCAL_PORT:$registry_port" > .registry-pf.log 2>&1 & + wait_for_url_timed "http://localhost:5000" + sleep 5 +} + +function unset_registry_portforward() { + local pids=$(ps -ef | awk '/oc port-forward docker-registry/ {print $2}') + kill -9 $pids &>/dev/null +} + +function docker_login() { + docker login -u image-bot -p "$(oc -n ibm-common-services serviceaccounts get-token image-bot)" localhost:$LOCAL_PORT + if [[ $? -ne 0 ]]; then + echo "Docker login failed, please check the image registry in your cluster and try again" + exit 1 + fi +} + +function docker_logout() { + docker login -u image-bot -p "$(oc -n ibm-common-services serviceaccounts get-token image-bot)" localhost:$LOCAL_PORT +} + +function upload() { + for file in $(ls ../offline) + do + if test -f "../offline/$file/image.tar"; then + echo "Load images for $file ..." + docker load --input ../offline/$file/image.tar + echo "Tag and push images for $file ..." + for image in $(cat ../offline/$file/image.manifest); do + imageName=$(echo $image | awk -F "/" '{print $NF}') + docker tag $image "localhost:$LOCAL_PORT/ibm-common-services/$imageName" && docker push "localhost:$LOCAL_PORT/ibm-common-services/$imageName" + docker rmi "localhost:5000/ibm-common-services/$imageName" $image + done + else + echo "There is no image.tar file in the $file folder" + fi + done +} + +function wait_for_url_timed { + STARTTIME=$(date +%s) + url=$1 + max_wait=${2:-60*1000} + wait=0.2 + expire=$(($(time_now) + $max_wait)) + set +e + while [[ $(time_now) -lt $expire ]]; do + out=$(curl --max-time 2 -fs $url 2>/dev/null) + if [ $? -eq 0 ]; then + set -e + echo ${out} + ENDTIME=$(date +%s) + echo "Success accessing '$url' after $(($ENDTIME - $STARTTIME)) seconds" + return 0 + fi + sleep $wait + done + echo "ERROR: gave up waiting for $url" + set -e + return 1 +} + +function time_now() { + echo $(date +%s000) +} + +function offline_config() { + sed -i_orig "s|quay.io/opencloudio|docker-registry.default.svc:5000/ibm-common-services|g" configmap.yaml install.yaml uninstall.yaml +} + +function install() { + oc apply -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\namespace.yaml + oc apply -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\rbac.yaml + oc apply -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\configmap.yaml + oc apply -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\install.yaml + if [[ $ASYNC != "--async" ]]; then + waiting_complete "deploy" + fi + exit 0 +} + +function uninstall() { + if [[ $ASYNC != "--async" ]]; then + waiting_complete "uninstall" + fi + oc delete job cs-operands-install cs-operands-uninstall -n $NAMESPACE + oc delete -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\rbac.yaml + oc delete -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\configmap.yaml + oc delete -f ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311}\namespace.yaml + oc delete namespace ibm-common-services + exit 0 +} + +# Waiting for common service deploy complete +function waiting_complete() { + index=0 + retries=30 + while true; do + if [[ $index -eq $retries ]]; then + echo "Timeout for deploy common services" + exit 1 + fi + + if [[ $1 == "uninstall" ]]; then + latest_deploy=$(oc -n $NAMESPACE get pods -l 'operation=uninstall,control-plane=cs-operands' --sort-by=.metadata.creationTimestamp -o=name | sed "s/^.\{4\}//" | head -n1 2>/dev/null) + elif [[ $1 == "deploy" ]]; then + latest_deploy=$(oc -n $NAMESPACE get pods -l 'operation=deploy,control-plane=cs-operands' --sort-by=.metadata.creationTimestamp -o=name | sed "s/^.\{4\}//" | head -n1 2>/dev/null) + else + latest_deploy=$(oc -n $NAMESPACE get pods -l 'operation in (deploy,uninstall),control-plane=cs-operands' --sort-by=.metadata.creationTimestamp -o=name | sed "s/^.\{4\}//" | head -n1 2>/dev/null) + fi + + if [[ ! -z "$latest_deploy" ]]; then + DEPLOYING_STATUS=$(oc -n $NAMESPACE get pods $latest_deploy --no-headers | awk '{print $3}') + if [[ "$DEPLOYING_STATUS" == "Running" ]]; then + oc -n $NAMESPACE logs $latest_deploy -f + continue + elif [[ "$DEPLOYING_STATUS" == "Completed" ]]; then + echo "Common services job completed" + break + elif [[ "$DEPLOYING_STATUS" == "Error" ]]; then + echo "Common services job failed. Check deploy log with command: oc -n $NAMESPACE logs $latest_deploy" + exit 1 + else + index=$(( index + 1 )) + [[ $(( $index % 5 )) -eq 0 ]] && echo "Waiting for common services job running ..." + sleep 10 + continue + fi + else + index=$(( index + 1 )) + [[ $(( $index % 5 )) -eq 0 ]] && echo "Waiting for common services job create ..." + sleep 30 + continue + fi + done +} + +case $ARG in + upload) + create_image_bot + set_registry_portforward + docker_login + upload + docker_logout + unset_registry_portforward + ;; + offline-install) + create_image_bot + set_registry_portforward + docker_login + upload + docker_logout + unset_registry_portforward + offline_config + install + ;; + offline-uninstall) + offline_config + uninstall + ;; + install) + install + ;; + uninstall) + uninstall + ;; + *) + echo "Please input correct command: upload, install, uninstall, offline-install, offline-uninstall" + ;; +esac diff --git a/descriptors/common-services/scripts/configmap.yaml b/descriptors/common-services/scripts/configmap.yaml new file mode 100644 index 00000000..d6cfc588 --- /dev/null +++ b/descriptors/common-services/scripts/configmap.yaml @@ -0,0 +1,22 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: v1 +kind: ConfigMap +metadata: + name: operands-playbook-config + namespace: cs-operands-installer +data: + config.yaml: | + # Common Service namespace, currently does not support updating it + namespace: ibm-common-services + + # Image repository, only update when using custom registry + image_repo: quay.io/opencloudio diff --git a/descriptors/common-services/scripts/install.yaml b/descriptors/common-services/scripts/install.yaml new file mode 100644 index 00000000..9b13d4c0 --- /dev/null +++ b/descriptors/common-services/scripts/install.yaml @@ -0,0 +1,42 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: batch/v1 +kind: Job +metadata: + name: cs-operands-install + namespace: cs-operands-installer + labels: + control-plane: cs-operands +spec: + parallelism: 1 + completions: 1 + activeDeadlineSeconds: 7200 + backoffLimit: 0 + template: + metadata: + labels: + control-plane: cs-operands + operation: deploy + spec: + restartPolicy: Never + containers: + - image: quay.io/opencloudio/operands-playbook:1.1.0 + name: "install" + command: ["ansible-playbook", "-e", "@config.yaml", "/installer/playbook/install.yaml"] + imagePullPolicy: Always + volumeMounts: + - name: config-volume + mountPath: /installer/config.yaml + subPath: config.yaml + volumes: + - name: config-volume + configMap: + name: operands-playbook-config diff --git a/descriptors/common-services/scripts/namespace.yaml b/descriptors/common-services/scripts/namespace.yaml new file mode 100644 index 00000000..a70a2c4b --- /dev/null +++ b/descriptors/common-services/scripts/namespace.yaml @@ -0,0 +1,17 @@ +--- +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: v1 +kind: Namespace +metadata: + labels: + control-plane: cs-operands + name: cs-operands-installer diff --git a/descriptors/common-services/scripts/rbac.yaml b/descriptors/common-services/scripts/rbac.yaml new file mode 100644 index 00000000..76b10cc5 --- /dev/null +++ b/descriptors/common-services/scripts/rbac.yaml @@ -0,0 +1,22 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: cs-operands-playbook +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: cluster-admin +subjects: + - kind: ServiceAccount + name: default + namespace: cs-operands-installer diff --git a/descriptors/common-services/scripts/uninstall.yaml b/descriptors/common-services/scripts/uninstall.yaml new file mode 100644 index 00000000..090df4cd --- /dev/null +++ b/descriptors/common-services/scripts/uninstall.yaml @@ -0,0 +1,34 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: batch/v1 +kind: Job +metadata: + name: cs-operands-uninstall + namespace: cs-operands-installer + labels: + control-plane: cs-operands +spec: + parallelism: 1 + completions: 1 + activeDeadlineSeconds: 7200 + backoffLimit: 0 + template: + metadata: + labels: + control-plane: cs-operands + operation: uninstall + spec: + restartPolicy: Never + containers: + - image: quay.io/opencloudio/operands-playbook:1.1.0 + name: "uninstall" + command: ["ansible-playbook", "-e", "@config.yaml", "/installer/playbook/uninstall.yaml"] + imagePullPolicy: Always diff --git a/descriptors/cp4a-bronze-storage-class.yaml b/descriptors/cp4a-bronze-storage-class.yaml new file mode 100644 index 00000000..3b21c706 --- /dev/null +++ b/descriptors/cp4a-bronze-storage-class.yaml @@ -0,0 +1,16 @@ +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: + name: cp4a-file-retain-bronze-gid + labels: + kubernetes.io/cluster-service: "true" +provisioner: ibm.io/ibmc-file +parameters: + type: "Endurance" + iopsPerGB: "2" + sizeRange: "[20-12000]Gi" + billingType: "hourly" + classVersion: "2" + gidAllocate: "true" +reclaimPolicy: Retain +volumeBindingMode: Immediate \ No newline at end of file diff --git a/descriptors/cp4a-gold-storage-class.yaml b/descriptors/cp4a-gold-storage-class.yaml new file mode 100644 index 00000000..08217bee --- /dev/null +++ b/descriptors/cp4a-gold-storage-class.yaml @@ -0,0 +1,16 @@ +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: + name: cp4a-file-retain-gold-gid + labels: + kubernetes.io/cluster-service: "true" +provisioner: ibm.io/ibmc-file +parameters: + type: "Endurance" + iopsPerGB: "10" + sizeRange: "[20-4000]Gi" + billingType: "hourly" + classVersion: "2" + gidAllocate: "true" +reclaimPolicy: Retain +volumeBindingMode: Immediate \ No newline at end of file diff --git a/descriptors/cp4a-silver-storage-class.yaml b/descriptors/cp4a-silver-storage-class.yaml new file mode 100644 index 00000000..01eb76bb --- /dev/null +++ b/descriptors/cp4a-silver-storage-class.yaml @@ -0,0 +1,16 @@ +apiVersion: storage.k8s.io/v1beta1 +kind: StorageClass +metadata: + name: cp4a-file-retain-silver-gid + labels: + kubernetes.io/cluster-service: "true" +provisioner: ibm.io/ibmc-file +parameters: + type: "Endurance" + iopsPerGB: "4" + sizeRange: "[20-12000]Gi" + billingType: "hourly" + classVersion: "2" + gidAllocate: "true" +reclaimPolicy: Retain +volumeBindingMode: Immediate \ No newline at end of file diff --git a/descriptors/ibm_cp4a_cr_template.yaml b/descriptors/ibm_cp4a_cr_template.yaml deleted file mode 100644 index 58f12821..00000000 --- a/descriptors/ibm_cp4a_cr_template.yaml +++ /dev/null @@ -1,2089 +0,0 @@ -############################################################################### -# -# Licensed Materials - Property of IBM -# -# (C) Copyright IBM Corp. 2020. All Rights Reserved. -# -# US Government Users Restricted Rights - Use, duplication or -# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. -# -############################################################################### -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: demo-template - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba - release: 20.0.1 -spec: - # appVersion: 20.0.1 - ## shared configuration among all tribe - #shared_configuration: - # sc_deployment_type: "production" ### Possible values are "production" , "non-production" - # image_pull_secrets: - # - image-pull-secret - # images: - # keytool_job_container: - # repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer - # tag: 20.0.1 - # dbcompatibility_init_container: - # repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer - # tag: 20.0.1 - # keytool_init_container: - # repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer - # tag: 20.0.1 - # umsregistration_initjob: - # repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob - # tag: 20.0.1 - # pull_policy: IfNotPresent - # root_ca_secret: icp4a-root-ca - # sc_deployment_platform: OCP - # sc_deployment_hostname_suffix: "{{ meta.name }}" - # trusted_certificate_list: [] - # encryption_key_secret: icp4a-shared-encryption-key - #ldap_configuration: - # the candidate value is "IBM Security Directory Server" or "Microsoft Active Directory" - # lc_selected_ldap_type: "IBM Security Directory Server" - # lc_ldap_server: "" - # lc_ldap_port: "389" - # lc_bind_secret: ldap-bind-secret # secret is expected to have ldapUsername and ldapPassword keys - # lc_ldap_base_dn: "dc=hqpsidcdom,dc=com" - # lc_ldap_ssl_enabled: false - # lc_ldap_ssl_secret_name: "" - # lc_ldap_user_name_attribute: "*:cn" - # lc_ldap_user_display_name_attr: "cn" - # lc_ldap_group_base_dn: "dc=hqpsidcdom,dc=com" - # lc_ldap_group_name_attribute: "*:cn" - # lc_ldap_group_display_name_attr: "cn" - # lc_ldap_group_membership_search_filter: "(|(&(objectclass=groupofnames)(member={0}))(&(objectclass=groupofuniquenames)(uniquemember={0})))" - # lc_ldap_group_member_id_map: "groupofnames:member" - # lc_ldap_max_search_results: 4500 - # ca_ldap_configuration: - # lc_user_filter: "(&(cn={{ '{{' }}username{{ '}}'}})(objectclass=person))" - # lc_ldap_self_signed_crt: "" #true or false when lc_ldap_ssl_enabled: true - # ad: - # lc_ad_gc_host: "" - # lc_ad_gc_port: "" - # lc_user_filter: "(&(cn=%v)(objectclass=person))" - # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" - # tds: - # lc_user_filter: "(&(cn=%v)(objectclass=person))" - # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" - #ext_ldap_configuration: - # # the candidate value is "IBM Security Directory Server" or "Microsoft Active Directory" - # lc_selected_ldap_type: "IBM Security Directory Server" - # lc_ldap_server: "" - # lc_ldap_port: "389" - # lc_bind_secret: ext-ldap-bind-secret # secret is expected to have ldapUsername and ldapPassword keys - # lc_ldap_base_dn: "O=LOCAL" - # lc_ldap_ssl_enabled: false - # lc_ldap_ssl_secret_name: "" - # lc_ldap_user_name_attribute: "*:cn" - # lc_ldap_user_display_name_attr: "cn" - # lc_ldap_group_base_dn: "O=LOCAL" - # lc_ldap_group_name_attribute: "*:cn" - # lc_ldap_group_display_name_attr: "cn" - # lc_ldap_group_membership_search_filter: "(|(&(objectclass=groupofnames)(member={0}))(&(objectclass=groupofuniquenames)(uniquemember={0})))" - # lc_ldap_group_member_id_map: "groupofnames:member" - # ad: - # lc_ad_gc_host: "" - # lc_ad_gc_port: "" - # lc_user_filter: "(&(cn=%v)(objectclass=person))" - # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" - # tds: - # lc_user_filter: "(&(cn=%v)(objectclass=person))" - # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" - #datasource_configuration: - # the candidate value is "db2" or "db2HADR" or "oracle" or "sqlserver" - # dc_gcd_datasource: - # dc_database_type: "db2" - # dc_common_gcd_datasource_name: "FNGCDDS" - # dc_common_gcd_xa_datasource_name: "FNGCDDSXA" - # database_servername: "" - # database_name: "GCDDB" - # database_port: "50000" - # dc_oracle_gcd_jdbc_url: "jdbc:oracle:thin:@//:1521/orcl" - # dc_hadr_standby_servername: "" - # dc_hadr_standby_port: "50000" - # dc_hadr_validation_timeout: 15 - # dc_hadr_retry_interval_for_client_reroute: 15 - # dc_hadr_max_retries_for_client_reroute: 3 - # dc_os_datasources: - # - dc_database_type: "db2" - # dc_common_os_datasource_name: "FNOS1DS" - # dc_common_os_xa_datasource_name: "FNOS1DSXA" - # database_servername: "" - # database_name: "OS1DB" - # database_port: "50000" - # dc_oracle_os_jdbc_url: "jdbc:oracle:thin:@//:1521/orcl" - # dc_hadr_standby_servername: "" - # dc_hadr_standby_port: "50000" - # dc_hadr_validation_timeout: 3 - # dc_hadr_retry_interval_for_client_reroute: 3 - # dc_hadr_max_retries_for_client_reroute: 3 - # - dc_database_type: "db2" - # dc_common_os_datasource_name: "FNOS2DS" - # dc_common_os_xa_datasource_name: "FNOS2DSXA" - # database_servername: "" - # database_name: "OS2DB" - # database_port: "50000" - # dc_oracle_os_jdbc_url: "jdbc:oracle:thin:@//:1521/orcl" - # dc_hadr_standby_servername: "" - # dc_hadr_standby_port: "50000" - # dc_hadr_validation_timeout: 3 - # dc_hadr_retry_interval_for_client_reroute: 3 - # dc_hadr_max_retries_for_client_reroute: 3 - # dc_icn_datasource: - # dc_database_type: "db2" - # dc_oracle_icn_jdbc_url: "jdbc:oracle:thin:@//:1521/orcl" - # dc_common_icn_datasource_name: "ECMClientDS" - # database_servername: "" - # database_port: "50000" - # database_name: "ICNDB" - # dc_hadr_standby_servername: "" - # dc_hadr_standby_port: "50000" - # dc_hadr_validation_timeout: 3 - # dc_hadr_retry_interval_for_client_reroute: 3 - # dc_hadr_max_retries_for_client_reroute: 3 - # dc_odm_datasource: - # dc_database_type: "db2" - # database_servername: "db2forodm" - # dc_common_database_port: "50000" - # dc_common_database_name: "db2db" - # dc_common_database_instance_secret: "" - # dc_common_database_instance_user: "db2user" # Will remove it, and use K8S dc_common_database_instance_secret Secret to replace it - # dc_common_database_instance_password: "{base64}UGFzc3cwcmQ0SypT" # Will remove it, and use K8S dc_common_database_instance_secret Secret to replace it - #dc_ums_datasource: # credentials are read from ums_configuration.db_secret_name - # # oauth database config - # dc_ums_oauth_type: db2 # derby (for test), db2, oracle - # dc_ums_oauth_host: - # dc_ums_oauth_port: 50000 - # dc_ums_oauth_name: UMSDB - # dc_ums_oauth_schema: OAuthDBSchema - # dc_ums_oauth_ssl: false - # dc_ums_oauth_ssl_secret_name: - # dc_ums_oauth_driverfiles: - # dc_ums_oauth_alternate_hosts: - # dc_ums_oauth_alternate_ports: - # teamserver database config - # dc_ums_teamserver_type: db2 # derby (for test), db2 - # dc_ums_teamserver_host: - # dc_ums_teamserver_port: 50000 - # dc_ums_teamserver_name: UMSDB - # dc_ums_teamserver_ssl: false - # dc_ums_teamserver_ssl_secret_name: - # dc_ums_teamserver_driverfiles: - # dc_ums_teamserver_alternate_hosts: - # dc_ums_teamserver_alternate_ports: -# dc_ca_datasource: -# dc_database_type: "db2" # This value can be db2 or db2HADR -# database_servername: "" -# database_name: "" -# tenant_databases: -# - tenant1 -# database_port: "" -# #for DB2 HA, set the variables below -# dc_hadr_standby_servername: "" -# dc_hadr_standby_port: 50000 -# dc_hadr_retry_interval_for_client_reroute: 15 -# dc_hadr_max_retries_for_client_reroute: 3 - # # Monitor setting - # monitoring_configuration: - # mon_metrics_writer_option: 4 - # mon_enable_plugin_pch: false - # mon_enable_plugin_mbean: false - # collectd_plugin_write_graphite_host: localhost - # collectd_plugin_write_graphite_port: 2003 - # collectd_interval: 10 - # collectd_disable_host_monitoring: false - # collectd_plugin_write_prometheus_port: 9103 - - # # Logging setting - # logging_configuration: - # mon_log_parse: false - # mon_log_service_endpoint: localhost:5044 - # private_logging_enabled: false - # logging_type: default - # mon_log_path: /path_to_extra_log - - ######################################################################## - ######## IBM FileNet Content Manager configuration ######## - ######################################################################## - #ecm_configuration: - # fncm_secret_name: ibm-fncm-secret - # fncm_auth_ca_secret_name: "{{ meta.name }}-fncm-auth-ca-secret" - # cpe: - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/fncm/cpe - # tag: ga-554-p8cpe-if001 - # pull_policy: IfNotPresent - # ## Logging for workloads - # log: - # format: json - # ## resource - # resources: - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 1 - # memory: 3072Mi - # ## Horizontal Pod Autoscaler - # auto_scaling: - # enabled: true - # max_replicas: 3 - # min_replicas: 1 - # target_cpu_utilization_percentage: 80 - # ## Route public hostname - # hostname: "" - # ## cpe Production setting - # cpe_production_setting: - # time_zone: Etc/UTC - # jvm_initial_heap_percentage: 18 - # jvm_max_heap_percentage: 33 - # # By default, the containers are configured to support OpenID/OAuth for SSO with User Management Services (UMS). - # # If SSO is not enabled for the deployment (i.e., if UMS is not being deployed), then set the following JVM value: - # # JVM_CUSTOMIZE_OPTIONS="-DFileNet.WSI.AutoDetectLTPAToken=true" - # # This enables the container to recognize WebSphere Liberty LTPA token where LDAP is used for authentication/authorization. - # jvm_customize_options: "-DFileNet.WSI.AutoDetectLTPAToken=true" - # gcd_jndi_name: FNGCDDS - # gcd_jndixa_name: FNGCDDSXA - # license_model: FNCM.PVUNonProd - # license: accept - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## Specify the names of existing persistent volume claims to be used by your application. - # datavolume: - # existing_pvc_for_cpe_cfgstore: "cpe-cfgstore" - # existing_pvc_for_cpe_logstore: "cpe-logstore" - # existing_pvc_for_cpe_filestore: "cpe-filestore" - # existing_pvc_for_cpe_icmrulestore: "cpe-icmrulesstore" - # existing_pvc_for_cpe_textextstore: "cpe-textextstore" - # existing_pvc_for_cpe_bootstrapstore: "cpe-bootstrapstore" - # existing_pvc_for_cpe_fnlogstore: "cpe-fnlogstore" - # probe: - # readiness: - # initial_delay_seconds: 120 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 600 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - # css: - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/fncm/css - # tag: ga-554-p8css-if001 - # pull_policy: IfNotPresent - # ## Logging for workloads - # log: - # format: json - - # ## resource and autoscaling setting - # resources: - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 1 - # memory: 4096Mi - # ## CSS Production setting - # css_production_setting: - # jvm_max_heap_percentage: 50 - # license: accept - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## Specify the names of existing persistent volume claims to be used by your application. - # datavolume: - # existing_pvc_for_css_cfgstore: "css-cfgstore" - # existing_pvc_for_css_logstore: "css-logstore" - # existing_pvc_for_css_tmpstore: "css-tempstore" - # existing_pvc_for_index: "css-indexstore" - # existing_pvc_for_css_customstore: "css-customstore" - # probe: - # readiness: - # initial_delay_seconds: 60 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 180 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - # cmis: - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/fncm/cmis - # tag: ga-304-cmis-if010 - # pull_policy: IfNotPresent - # ## Logging for workloads - # log: - # format: json - - # ## resource - # resources: - # # We usually recommend not to specify default resources and to leave this as a conscious - # # choice for the user. This also increases chances charts run on environments with little - # # resources, such as Minikube. If you do want to specify resources, uncomment the following - # # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # requests: - # cpu: 500m - # memory: 256Mi - # limits: - # cpu: 1 - # memory: 1536Mi - - # ## Horizontal Pod Autoscaler - # auto_scaling: - # enabled: true - # max_replicas: 3 - # min_replicas: 1 - # target_cpu_utilization_percentage: 80 - # ## Route public hostname - # hostname: "" - # ## CMIS Production setting - # cmis_production_setting: - # cpe_url: - # time_zone: Etc/UTC - # jvm_initial_heap_percentage: 40 - # jvm_max_heap_percentage: 66 - # jvm_customize_options: "" - # checkout_copycontent: true - # default_maxitems: 25 - # cvl_cache: true - # secure_metadata_cache: false - # filter_hidden_properties: true - # querytime_limit: 180 - # resumable_queries_forrest: true - # escape_unsafe_string_characters: false - # max_soap_size: 180 - # print_pull_stacktrace: false - # folder_first_search: false - # ignore_root_documents: false - # supporting_type_mutability: false - # license: accept - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## global persistence settings - # datavolume: - # ## Specify the names of existing persistent volume claims to be used by your application. - # existing_pvc_for_cmis_cfgstore: "cmis-cfgstore" - # existing_pvc_for_cmis_logstore: "cmis-logstore" - # probe: - # readiness: - # initial_delay_seconds: 90 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 180 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - # graphql: - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/fncm/graphql - # tag: ga-554-p8cgql-if001 - # pull_policy: IfNotPresent - # ## resource - # resources: - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 1 - # memory: 1536Mi - # ## Horizontal Pod Autoscaler - # auto_scaling: - # enabled: true - # max_replicas: 1 - # min_replicas: 1 - # target_cpu_utilization_percentage: 80 - # ## Route public hostname - # hostname: "" - # ## GraphQL Production setting - # graphql_production_setting: - # time_zone: Etc/UTC - # jvm_initial_heap_percentage: 40 - # jvm_max_heap_percentage: 66 - # jvm_customize_options: "" - # license_model: FNCM.PVUNonProd - # license: accept - # enable_graph_iql: false - # cpe_uri: http://:9080/wsi/FNCEWS40MTOM - # ## Monitor setting and Logging setting - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## Specify the names of existing persistent volume claims to be used by your application. - # datavolume: - # existing_pvc_for_graphql_cfgstore: "graphql-cfgstore" - # existing_pvc_for_graphql_logstore: "graphql-logstore" - # probe: - # readiness: - # initial_delay_seconds: 120 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 600 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - # es: - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/fncm/extshare - # tag: ga-307-es-if002 - # pull_policy: IfNotPresent - # ## resource - # resources: - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 1 - # memory: 1536Mi - # ## Horizontal Pod Autoscaler - # auto_scaling: - # enabled: true - # max_replicas: 3 - # min_replicas: 1 - # target_cpu_utilization_percentage: 80 - # ## Route public hostname - # hostname: "" - # ## External Share Production setting - # es_production_setting: - # time_zone: Etc/UTC - # jvm_initial_heap_percentage: 40 - # jvm_max_heap_percentage: 66 - # jvm_customize_options: "" - # license_model: FNCM.PVUNonProd - # license: accept - # allowed_origins: "" - # ## Monitor setting and Logging setting - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## Specify the names of existing persistent volume claims to be used by your application. - # datavolume: - # existing_pvc_for_es_cfgstore: "es-cfgstore" - # existing_pvc_for_es_logstore: "es-logstore" - # probe: - # readiness: - # initial_delay_seconds: 180 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 600 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - # tm: - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/fncm/taskmgr - # tag: ga-307-tm-if002 - # pull_policy: IfNotPresent - # ## LOGGING FOR WORKLOADS - # log: - # format: JSON - # ## resource - # resources: - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 1 - # memory: 1536Mi - # ## Horizontal Pod Autoscaler - # auto_scaling: - # enabled: true - # max_replicas: 3 - # min_replicas: 1 - # target_cpu_utilization_percentage: 80 - # ## External Share Production setting - # tm_production_setting: - # time_zone: Etc/UTC - # jvm_initial_heap_percentage: 40 - # jvm_max_heap_percentage: 66 - # jvm_customize_options: "-Dcom.ibm.ecm.task.StartUpListener.defaultLogLevel=FINE" - # license: accept - # tm_dbtype: db2 - # tm_jndi_ds: ECMClientDS - # tm_schema: ICNDB - # tm_ts: ICNDB - # tm_admin: CEADMIN - - # ## Monitor setting and Logging setting - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## Specify the names of existing persistent volume claims to be used by your application. - # datavolume: - # existing_pvc_for_tm_cfgstore: "tm-cfgstore" - # existing_pvc_for_tm_logstore: "tm-logstore" - # existing_pvc_for_tm_pluginstore: "tm-pluginstore" - # probe: - # readiness: - # initial_delay_seconds: 120 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 600 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - - ######################################################################## - ######## IBM Business Automation Navigator configuration ######## - ######################################################################## - #navigator_configuration: - # ban_secret_name: ibm-ban-secret - # arch: - # amd64: "3 - Most preferred" - # replica_count: 1 - # image: - # repository: cp.icr.io/cp/cp4a/ban/navigator-sso - ### For non-SSO Navigator image use below image - # repository: cp.icr.io/cp/cp4a/ban/navigator - # tag: ga-307-icn-if002 - # pull_policy: IfNotPresent - # arbitrary_uid_enabled: true - # ## Logging for workloads - # log: - # format: json - # ## resource and autoscaling setting - # resources: - # # We usually recommend not to specify default resources and to leave this as a conscious - # # choice for the user. This also increases chances charts run on environments with little - # # resources, such as Minikube. If you do want to specify resources, uncomment the following - # # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 1 - # memory: 1536Mi - - # ## Horizontal Pod Autoscaler - # auto_scaling: - # enabled: true - # max_replicas: 3 - # min_replicas: 1 - # target_cpu_utilization_percentage: 80 - # ## Route public hostname - # hostname: "" - # ## ICN Production setting - # icn_production_setting: - # timezone: Etc/UTC - # jvm_initial_heap_percentage: 40 - # jvm_max_heap_percentage: 66 - # # By default, the containers are configured to support OpenID/OAuth for SSO with User Management Services (UMS). - # # If SSO is not enabled for the deployment (i.e., if UMS is not being deployed), then set the following JVM value: - # # JVM_CUSTOMIZE_OPTIONS="-DFileNet.WSI.AutoDetectLTPAToken=true" - # # This enables the container to recognize WebSphere Liberty LTPA token where LDAP is used for authentication/authorization. - # jvm_customize_options: "-DFileNet.WSI.AutoDetectLTPAToken=true" - # icn_db_type: db2 - # icn_jndids_name: ECMClientDS - # icn_schema: ICNDB - # icn_table_space: ICNDB - # icn_admin: CEADMIN - # license: accept - # enable_appcues: false - # allow_remote_plugins_via_http: false - # monitor_enabled: true - # logging_enabled: true - # collectd_enable_plugin_write_graphite: true - # ## Specify the names of existing persistent volume claims to be used by your application. - # ## Specify an empty string if you don't have existing persistent volume claim. - # datavolume: - # existing_pvc_for_icn_cfgstore: "icn-cfgstore" - # existing_pvc_for_icn_logstore: "icn-logstore" - # existing_pvc_for_icn_pluginstore: "icn-pluginstore" - # existing_pvc_for_icnvw_cachestore: "icn-vw-cachestore" - # existing_pvc_for_icnvw_logstore: "icn-vw-logstore" - # existing_pvc_for_icn_aspera: "icn-asperastore" - # probe: - # readiness: - # initial_delay_seconds: 120 - # period_seconds: 5 - # timeout_seconds: 10 - # failure_threshold: 6 - # liveness: - # initial_delay_seconds: 600 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # image_pull_secrets: - # name: "admin.registrykey" - - # ######################################################################## - # ######## IBM FNCM and BAN initialization configuration ######## - # ######################################################################## - #initialize_configuration: - # ic_domain_creation: - # domain_name: "P8DOMAIN" - # encryption_key: "128" - # ic_ldap_creation: - # ic_ldap_admin_user_name: - # - "CEAdmin" - # ic_ldap_admins_groups_name: - # - "P8Administrators" - # ic_ldap_name: "ldap_name" - # ic_obj_store_creation: - # object_stores: - # - oc_cpe_obj_store_display_name: "OS01" - # oc_cpe_obj_store_symb_name: "OS01" - # oc_cpe_obj_store_conn: - # name: "objectstore1_connection" - # site_name: "InitialSite" - # dc_os_datasource_name: "FNOS1DS" - # dc_os_xa_datasource_name: "FNOS1DSXA" - # oc_cpe_obj_store_admin_user_groups: - # - "CEAdmin" - # # Array of users - # oc_cpe_obj_store_basic_user_groups: - # oc_cpe_obj_store_addons: true - # oc_cpe_obj_store_addons_list: - # - "{CE460ADD-0000-0000-0000-000000000004}" - # - "{CE460ADD-0000-0000-0000-000000000001}" - # - "{CE460ADD-0000-0000-0000-000000000003}" - # - "{CE460ADD-0000-0000-0000-000000000005}" - # - "{CE511ADD-0000-0000-0000-000000000006}" - # - "{CE460ADD-0000-0000-0000-000000000008}" - # - "{CE460ADD-0000-0000-0000-000000000007}" - # - "{CE460ADD-0000-0000-0000-000000000009}" - # - "{CE460ADD-0000-0000-0000-00000000000A}" - # - "{CE460ADD-0000-0000-0000-00000000000B}" - # - "{CE460ADD-0000-0000-0000-00000000000D}" - # - "{CE511ADD-0000-0000-0000-00000000000F}" - # oc_cpe_obj_store_asa_name: "demo_storage" - # oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" - # oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os01_storagearea1" - # oc_cpe_obj_store_enable_workflow: true - # oc_cpe_obj_store_workflow_region_name: "design_region_name" - # oc_cpe_obj_store_workflow_region_number: 1 - # oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" - # oc_cpe_obj_store_workflow_index_tbl_space: "" - # oc_cpe_obj_store_workflow_blob_tbl_space: "" - # oc_cpe_obj_store_workflow_admin_group: "P8Administrators" - # oc_cpe_obj_store_workflow_config_group: "P8Administrators" - # oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" - # oc_cpe_obj_store_workflow_locale: "en" - # oc_cpe_obj_store_workflow_pe_conn_point_name: "pe_conn_os1" - # ic_css_creation: - # - css_site_name: "Initial Site" - # css_text_search_server_name: "{{ meta.name }}-css-1" - # affinity_group_name: "aff_group" - # css_text_search_server_status: 0 - # css_text_search_server_mode: 0 - # css_text_search_server_ssl_enable: "true" - # css_text_search_server_credential: "RNUNEWc=" - # css_text_search_server_host: "{{ meta.name }}-css-svc-1" - # css_text_search_server_port: 8199 - # ic_css_index_area: - # - object_store_name: "OS01" - # index_area_name: "os1_index_area" - # affinity_group_name: "aff_group" - # root_dir: "/opt/ibm/indexareas" - # max_indexes: 20 - # max_objects_per_index: 10000 - # ic_enable_cbr: - # - object_store_name: "OS01" - # class_name: "Document" - # indexing_languages: "en" - # ic_icn_init_info: - # icn_repos: - # - add_repo_id: "demo_repo1" - # add_repo_ce_wsi_url: "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM/" - # add_repo_os_sym_name: "OS01" - # add_repo_os_dis_name: "OS01" - # add_repo_workflow_enable: false - # add_repo_work_conn_pnt: "pe_conn_os1:1" - # add_repo_protocol: "FileNetP8WSI" - # # - add_repo_id: "test_repo2" - # # add_repo_ce_wsi_url: "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM/" - # # add_repo_os_sym_name: "OS02" - # # add_repo_os_dis_name: "OS02" - # # add_repo_workflow_enable: true - # # add_repo_work_conn_pnt: "pe_conn_os02:1" - # # add_repo_protocol: "FileNetP8WSI" - # icn_desktop: - # - add_desktop_id: "demo" - # add_desktop_name: "icn_desktop" - # add_desktop_description: "This is ICN desktop" - # add_desktop_is_default: false - # add_desktop_repo_id: "demo_repo1" - # add_desktop_repo_workflow_enable: false - # # - add_desktop_id: "demotest" - # # add_desktop_name: "icn_desktop_demo" - # # add_desktop_description: "Just Another desktop" - # # add_desktop_is_default: false - # # add_desktop_repo_id: "test_repo2" - # # add_desktop_repo_workflow_enable: false - - ######################################################################## - ######## IBM FNCM and BAN verification configuration ######## - ######################################################################## - #verify_configuration: - # vc_cpe_verification: - # vc_cpe_folder: - # - folder_cpe_obj_store_name: "OS01" - # folder_cpe_folder_path: "/TESTFOLDER" - # vc_cpe_document: - # - doc_cpe_obj_store_name: "OS01" - # doc_cpe_folder_name: "/TESTFOLDER" - # doc_cpe_doc_title: "test_title" - # DOC_CPE_class_name: "Document" - # doc_cpe_doc_content: "This is a simple document test" - # doc_cpe_doc_content_name: "doc_content_name" - # vc_cpe_cbr: - # - cbr_cpe_obj_store_name: "OS01" - # cbr_cpe_class_name: "Document" - # cbr_cpe_search_string: "is a simple" - # vc_cpe_workflow: - # - workflow_cpe_enabled: false - # workflow_cpe_connection_point: "pe_conn_os1" - # vc_icn_verification: - # - vc_icn_repository: "demo_repo1" - # vc_icn_desktop_id: "demo" - - ######################################################################## - ######## IBM Operational Decision Manager Configuration ######## - ######################################################################## - - # odm_configuration: - # # Allow to activate more trace for ODM in the Operator pod. - # debug: false - # # Allow to specify which version of ODM you want to deploy. - # # Supported version > 19.0.2 - # # If omitted the latest version will be used. - # version: 20.0.1 - # image: - # # Specify the repository used to retrieve the Docker images if you do not want to use the default one. - # repository: "" - # # Specify the tag for the Docker images. - # # It's a Mandatory tag when you enable odm_configuraton. - # tag: 8.10.3.0_ICP2001 - # # Specify the pull policy for the Docker images. See Kuberntes documentation for more inforations. - # # Possible values : IfNotPresent, Always, Never - # pullPolicy: IfNotPresent - # # Optionally specify an array of imagePullSecrets. - # # Secrets must be manually created in the namespace. - # # Ex: pullSecrets: "mypullsecret" - # pullSecrets: - # - # service: - # # Specify whether to enable Transport Layer Security. If true, ODM web apps are accessed through HTTPS. If false, they are accessed through HTTP. - # enableTLS: true - # # Specify the service type. - # type: NodePort - # - # ## Decision Server Runtime parameters - # decisionServerRuntime: - # # Specify whether to enable Decision Server Runtime. - # enabled: true - # # Specify the number of Decision Server Runtime pods. - # replicaCount: 1 - # # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. - # loggingRef: - # # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. - # jvmOptionsRef: - # # Specify the name of the configmap that contains the XU configuration property file. - # xuConfigRef: - # resources: - # requests: - # # Specify the requested CPU. - # cpu: 500m - # # Specify the requested memory. - # memory: 512Mi - # limits: - # # Specify the CPU limit. - # cpu: 2 - # # Specify the memory limit. - # memory: 4096Mi - # ## Decision Server Console parameters - # decisionServerConsole: - # # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. - # loggingRef: - # # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. - # jvmOptionsRef: - # resources: - # requests: - # # Specify the requested CPU. - # cpu: 500m - # # Specify the requested memory. - # memory: 512Mi - # limits: - # # Specify the CPU limit. - # cpu: 2 - # # Specify the memory limit. - # memory: 1024Mi - # ## Decision Center parameters - # decisionCenter: - # # Specify whether to enable Decision Center. - # enabled: true - # # Specify the persistence locale for Decision Center. - # # Possible values "ar_EG" (Arabic - Egypt), "zh_CN" (Chinese - China), "zh_TW" (Chinese - Taiwan) - # # "nl_NL" (Netherlands), "en_GB" (English - United Kingdom), "en_US" (English - United States), - # # "fr_FR" (French - France), "de_DE" (German - Germany), "iw_IL" (Hebrew - Israel), "it_IT" (Italian - Italy), - # # "ja_JP" (Japanese - Japan) , "ko_KR" (Korean - Korea), "pl_PL" (Polish - Poland), - # # "pt_BR" (Portuguese - Brazil), "ru_RU" (Russian - Russia), "es_ES" (Spanish - Spain) - # persistenceLocale: en_US - # # Specify the number of Decision Center pods. - # replicaCount: 1 - # # Persistent Volume Claim to access the custom libraries - # customlibPvc: - # # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. - # loggingRef: - # # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. - # jvmOptionsRef: - # resources: - # requests: - # # Specify the requested CPU. - # cpu: 500m - # # Specify the requested memory. - # memory: 1500Mi - # limits: - # # Specify the CPU limit. - # cpu: 2 - # # Specify the memory limit. - # memory: 4096Mi - # ## Decision Runner parameters - # decisionRunner: - # # Specify whether to enable Decision Runner. - # enabled: true - # # Specify the number of Decision Runner pods. - # replicaCount: 1 - # # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. - # loggingRef: - # # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. - # jvmOptionsRef: - # resources: - # requests: - # # Specify the requested CPU. - # cpu: 500m - # # Specify the requested memory. - # memory: 512Mi - # limits: - # # Specify the CPU limit. - # cpu: 2 - # # Specify the memory limit. - # memory: 4096Mi - # - # ## Database - Option 1: Internal (PostgreSQL) - # ## Fill in the parameters to use an internal PostgresSQL database. - # internalDatabase: - # # Specify the name of the internal database. - # databaseName: odmdb - # # Specify the name of the secret that contains the credentials to connect to the internal database. - # secretCredentials: "" - # persistence: - # # Specify whether to enable persistence for the internal database in a persistent volume. - # enabled: true - # # When this parameter is false, the binding process selects an existing volume. Ensure that an unbound volume exists before you install the chart. - # useDynamicProvisioning: false - # # Specify the storage class name for persistent volume. If this parameter is left empty, the default storage class is used. - # storageClassName: "" - # resources: - # requests: - # # Specify the storage size for persistent volume. - # storage: 5Gi - # securityContext: - # # User to init internal database container - # runAsUser: 0 - # resources: - # requests: - # # Specify the requested CPU. - # cpu: 500m - # # Specify the requested memory. - # memory: 512Mi - # limits: - # # Specify the CPU limit. - # cpu: 2 - # # Specify the memory limit. - # memory: 4096Mi - # - # ## Database - Option 2: External (DB2 or PostgreSQL) - # ## Fill in the parameters to use an external DB2 or PostgreSQL database. - # externalDatabase: - # # Specify the type of the external database. If this parameter is left empty, PostgreSQL is used by default. - # # Possible values : "db2", "postgresql" - # type: "" - # # Specify the name of the server running the external database. If it is not specified, the PostgreSQL internal database is used. - # serverName: "" - # # Specify the name of the external database. - # databaseName: "" - # # Specify the name of the secret that contains the credentials to connect to the external database. - # secretCredentials: "" - # # Specify the port used to connect to the external database. - # port: "" - # - # ## Database - Option 3: External (Custom) - # ## Fill in the parameters to use an external database configured by a secret. - # externalCustomDatabase: - # # Specify the name of the secret that contains the datasource configuration to use. - # datasourceRef: - # # Persistent Volume Claim to access the JDBC Database Driver - # driverPvc: - # - # readinessProbe: - # # Specify the number of seconds after the container has started before readiness probe is initiated. - # initialDelaySeconds: 5 - # # Specify how often (in seconds) to perform the probe. - # periodSeconds: 5 - # # Specify how many times Kubernetes will try before giving up when a pod starts and the probe fails. Giving up means marking the pod as Unready. - # failureThreshold: 45 - # # Specify the number of seconds after which the readiness probe times out. - # timeoutSeconds: 5 - # - # livenessProbe: - # # Specify the number of seconds after the container has started before liveness probe is initiated. - # initialDelaySeconds: 300 - # # Specify how often (in seconds) to perform the probe. - # periodSeconds: 10 - # # Specify how many times Kubernetes will try before giving up when a pod starts and the probe fails. Giving up means restarting the pod. - # failureThreshold: 10 - # # Specify the number of seconds after which the liveness probe times out. - # timeoutSeconds: 5 - # - # customization: - # # Specify the name of the secret that contains the TLS certificate you want to use. If the parameter is left empty, a default certificate is used. - # securitySecretRef: - # # Specify the name of the secret that contains the configuration files required to use the IBM Business Automation Insights emitter. - # baiEmitterSecretRef: - # # Specify the label attached to some nodes. Pods are scheduled to the nodes with this label. If the parameter is empty, pods are scheduled randomly. - # authSecretRef: - # - # oidc: - # # Specify whether to enable OpenId Authentication. - # enabled: false - # # Specify the OIDC Server Host - # host: - # # Specify the OIDC Server Port - # port: - # # Specify the name of the secret that contains the admin username and password to connect to the OpenId Server. - # adminRef: - # # Specify the OIDC Redirect Uris - # redirectUris: - # # Specify the OIDC Client Secret reference - # clientRef: - # # Specify the OIDC Provider name - # provider: - # # Specify the OIDC Allowed Domains - # allowedDomains: - # - # networkPolicy: - # # Enable creation of NetworkPolicy resources. - # enabled: true - # # For Kubernetes v1.4, v1.5 and v1.6, use 'extensions/v1beta1' - # # For Kubernetes v1.7, use 'networking.k8s.io/v1' - # apiVersion: networking.k8s.io/v1 - # - #ums_configuration: - # existing_claim_name: - # replica_count: 2 - # service_type: Route - # hostname: - # port: 443 - # images: - # ums: - # repository: cp.icr.io/cp/cp4a/ums/ums - # tag: 20.0.1 - # admin_secret_name: ibm-dba-ums-secret - # # optional for secure communication with UMS - # external_tls_secret_name: ibm-dba-ums-external-tls-secret - # # optional for secure communication with UMS - # external_tls_ca_secret_name: ibm-dba-ums-external-tls-ca-secret - # # optional for secure communication with UMS - # external_tls_teams_secret_name: ibm-dba-ums-external-tls-teams-secret - # # optional for secure communication with UMS - # external_tls_scim_secret_name: ibm-dba-ums-external-tls-scim-secret - # # optional for secure communication with UMS - # external_tls_sso_secret_name: ibm-dba-ums-external-tls-sso-secret - # oauth: - # # optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret - # client_manager_group: - # # optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret - # token_manager_group: - # # optional: lifetime of OAuth access_tokens. default is 7200s - # access_token_lifetime: - # # optional: lifetime of app-tokens. default is 366d - # app_token_lifetime: - # # optional: lifetime of app-passwords. default is 366d - # app_password_lifetime: - # # optional: maximimum number of app-tokens or app-passwords per client. default is 100 - # app_token_or_password_limit: - # # optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 - # client_secret_encoding: - # resources: - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 200m - # memory: 256Mi - # ## Horizontal Pod Autoscaler - # autoscaling: - # enabled: true - # min_replicas: 2 - # max_replicas: 5 - # target_average_utilization: 98 - # use_custom_jdbc_drivers: false - # use_custom_binaries: false - # custom_secret_name: - # custom_xml: - # logs: - # console_format: json - # console_log_level: INFO - # console_source: message,trace,accessLog,ffdc,audit - # trace_format: ENHANCED - # trace_specification: "*=info" - ##################################################################### - ## IBM App Engine production configuration ## - ##################################################################### - #application_engine_configuration: - # ## The application_engine_configuration is a list, you can deploy multiple instances of AppEngine, you can assign different configurations for each instance. - # ## For each instance, application_engine_configuration.name and application_engine_configuration.name.hostname must be assigned to different values. - # - name: instance1 - # hostname: - # port: 443 - # admin_secret_name: - # admin_user: - # external_tls_secret: - # external_connection_timeout: - # replica_size: 1 - # user_custom_jdbc_drivers: false - # service_type: Route - # autoscaling: - # enabled: false - # max_replicas: 5 - # min_replicas: 2 - # target_cpu_utilization_percentage: 80 - # database: - # host: - # name: - # port: - # ## If you setup DB2 HADR and want to use it, you need to configure alternative_host and alternative_port, or else, leave is as blank. - # alternative_host: - # alternative_port: - # ## Only DB2 is supported - # type: db2 - # enable_ssl: false - # db_cert_secret_name: - # current_schema: DBASB - # initial_pool_size: 1 - # max_pool_size: 10 - # uv_thread_pool_size: 4 - # max_lru_cache_size: 1000 - # max_lru_cache_age: 600000 - # dbcompatibility_max_retries: 30 - # dbcompatibility_retry_interval: 10 - # custom_jdbc_pvc: - # log_level: - # node: info - # browser: 2 - # content_security_policy: - # enable: false - # whitelist: - # env: - # max_size_lru_cache_rr: 1000 - # server_env_type: development - # purge_stale_apps_interval: 86400000 - # apps_threshold: 100 - # stale_threshold: 172800000 - # images: - # pull_policy: IfNotPresent - # db_job: - # repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db - # tag: 20.0.1 - # solution_server: - # repository: cp.icr.io/cp/cp4a/aae/solution-server - # tag: 20.0.1 - # max_age: - # auth_cookie: "900000" - # csrf_cookie: "3600000" - # static_asset: "2592000" - # hsts_header: "2592000" - # probe: - # liveness: - # failure_threshold: 5 - # initial_delay_seconds: 60 - # period_seconds: 10 - # success_threshold: 1 - # timeout_seconds: 180 - # readiness: - # failure_threshold: 5 - # initial_delay_seconds: 10 - # period_seconds: 10 - # success_threshold: 1 - # timeout_seconds: 180 - # redis: - # host: - # port: - # ttl: 1800 - # resource_ae: - # limits: - # cpu: 2000m - # memory: 2Gi - # requests: - # cpu: 1000m - # memory: 1Gi - # resource_init: - # limits: - # cpu: 500m - # memory: 256Mi - # requests: - # cpu: 200m - # memory: 128Mi - # session: - # check_period: "3600000" - # duration: "1800000" - # max: "10000" - # resave: "false" - # rolling: "true" - # save_uninitialized: "false" - # use_external_store: "true" - # tls: - # tls_trust_list: [] - - #resource_registry_configuration: - # admin_secret_name: resource-registry-admin-secret - # hostname: - # port: - # replica_size: 3 - # images: - # pull_policy: IfNotPresent - # resource_registry: - # repository: cp.icr.io/cp/cp4a/aae/dba-etcd - # tag: 20.0.1 - # tls: - # tls_secret: rr-tls-client-secret - # probe: - # liveness: - # initial_delay_seconds: 60 - # period_seconds: 10 - # timeout_seconds: 5 - # success_threshold: 1 - # failure_threshold: 3 - # readiness: - # initial_delay_seconds: 10 - # period_seconds: 10 - # timeout_seconds: 5 - # success_threshold: 1 - # failure_threshold: 3 - # resource: - # limits: - # cpu: "500m" - # memory: "512Mi" - # requests: - # cpu: "200m" - # memory: "256Mi" - # auto_backup: - # enable: false - # minimal_time_interval: 180 - # pvc_name: rr-autobackup-pvc - # dynamic_provision: - # enable: false - # access_mode: ReadWriteMany - # size: 3Gi - # storage_class: nfs - ##################################################################### - ## IBM Business Automation Studio configuration ## - ##################################################################### - #bastudio_configuration: - # admin_secret_name: bastudio-admin-secret - # admin_user: - # hostname: - # port: - # # If we disable the User Management Service Certificate Common Name Check or not - # ums_disable_cn_check: false - # # If you don't want to use the customized external TLS certificate, you can leave it empty. - # external_tls_secret: - # # If you don't want to use the customized Certificate Authority (CA) to sign the external TLS certificate, you can leave it empty. - # external_tls_ca_secret: - # tls: - # tls_trust_list: [] - # database: - # host: - # # The database provided should be created by the BAStudio SQL script template. - # name: - # port: - # # If you want to enable the database ACR, HADR, configure the alternative_host and alternative_port both - # alternative_host: - # alternative_port: - # type: db2 - # ssl_enabled: false - # certificate_secret_name: db2-ssl-certificate - # # If you don't want to use the customized JDBC dirvers, you can keep it as default. - # user_custom_jdbc_drivers: false - # # The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled - # custom_jdbc_pvc: - # # The custom JDBC Drivers' names if using the custom jdbc drivers is enabled - # jdbc_driver_files: "db2jcc4.jar db2jcc_license_cu.jar" - # images: - # pull_policy: IfNotPresent - # bastudio: - # repository: cp.icr.io/cp/cp4a/bas/bastudio - # tag: 20.0.1 - # # Optional - # custom_xml: - # # Optional - # custom_secret_name: - # # Optional - # bastudio_custom_xml: - # content_security_policy: "default-src 'self' 'unsafe-inline' 'unsafe-eval'; frame-ancestors 'self'; font-src 'self' fonts.gstatic.com; frame-src *; img-src 'self' data:;" - # csrf_referrer: - # # The custom whitelist for Cross-Site Request Forgery (CSRF) protection. For example it is needed when you want to integrate BAS with the other editors such as ADW, ACA - # whitelist: "" - # logs: - # console_format: json - # console_log_level: INFO - # console_source: message,trace,accessLog,ffdc,audit - # trace_format: ENHANCED - # trace_specification: "*=info" - # replica_size: 1 - # autoscaling: - # enabled: false - # minReplicas: 1 - # maxReplicas: 3 - # targetAverageUtilization: 95 - # resources: - # bastudio: - # limits: - # cpu: 4000m - # memory: 3Gi - # requests: - # cpu: 2000m - # memory: 2Gi - # init_process: - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 200m - # memory: 256Mi - # liveness_probe: - # initial_delay_seconds: 300 - # period_seconds: 10 - # timeout_seconds: 5 - # failure_threshold: 3 - # success_threshold: 1 - # readiness_probe: - # initial_delay_seconds: 240 - # period_seconds: 5 - # timeout_seconds: 5 - # failure_threshold: 6 - # success_threshold: 1 - # jms_server: - # image: - # repository: cp.icr.io/cp/cp4a/bas/jms - # tag: 20.0.1 - # pull_policy: IfNotPresent - # resources: - # limits: - # cpu: "1" - # memory: "1Gi" - # requests: - # cpu: "500m" - # memory: "512Mi" - # storage: - # # If JMS storage persistent should be enabled - # persistent: false - # # If use dynamic provisioning for JMS storage persistent - # use_dynamic_provisioning: false - # storage_class: "gluster-fs" - # access_modes: "ReadWriteOnce" - # selector: {} - # size: "3Gi" - # #----------------------------------------------------------------------- - # # App Engine Playback Server can only be one instance - # #----------------------------------------------------------------------- - # playback_server: - # admin_secret_name: playback-server-admin-secret - # admin_user: - # external_connection_timeout: - # images: - # pull_policy: IfNotPresent - # db_job: - # repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db - # tag: 20.0.1 - # solution_server: - # repository: cp.icr.io/cp/cp4a/bas/solution-server - # tag: 20.0.1 - # hostname: - # port: - # # If you don't want to use the customized external TLS certificate, you can leave it empty. - # external_tls_secret: - # # If you don't want to use the customized JDBC dirvers, you can keep it as default. - # user_custom_jdbc_drivers: false - # replica_size: 1 - # autoscaling: - # enabled: false - # max_replicas: 5 - # min_replicas: 2 - # target_cpu_utilization_percentage: 80 - # database: - # host: - # # The database provided should be created by the App Engine Playback Server SQL script template. - # name: - # port: - # # If you want to enable the database ACR, HADR, configure the alternative_host and alternative_port both - # alternative_host: - # alternative_port: - # type: db2 - # enable_ssl: false - # db_cert_secret_name: db2-ssl-certificate-secret - # current_schema: DBASB - # initial_pool_size: 1 - # max_pool_size: 10 - # uv_thread_pool_size: 4 - # max_lru_cache_size: 1000 - # max_lru_cache_age: 600000 - # dbcompatibility_max_retries: 30 - # dbcompatibility_retry_interval: 10 - # # The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled - # custom_jdbc_pvc: - # log_level: - # node: info - # browser: 2 - # content_security_policy: - # enable: false - # whitelist: - # env: - # max_size_lru_cache_rr: 1000 - # server_env_type: development - # purge_stale_apps_interval: 86400000 - # apps_threshold: 100 - # stale_threshold: 172800000 - # max_age: - # auth_cookie: "900000" - # csrf_cookie: "3600000" - # static_asset: "2592000" - # hsts_header: "2592000" - # probe: - # liveness: - # failure_threshold: 5 - # initial_delay_seconds: 60 - # period_seconds: 10 - # success_threshold: 1 - # timeout_seconds: 180 - # readiness: - # failure_threshold: 5 - # initial_delay_seconds: 10 - # period_seconds: 10 - # success_threshold: 1 - # timeout_seconds: 180 - # redis: - # host: localhost - # port: 6379 - # ttl: 1800 - # resource_ae: - # limits: - # cpu: 2000m - # memory: 2Gi - # requests: - # cpu: 1000m - # memory: 1Gi - # resource_init: - # limits: - # cpu: 500m - # memory: 256Mi - # requests: - # cpu: 200m - # memory: 128Mi - # session: - # check_period: "3600000" - # duration: "1800000" - # max: "10000" - # resave: "false" - # rolling: "true" - # save_uninitialized: "false" - # use_external_store: "false" - # tls: - # tls_trust_list: [] - - #iaws_configuration: - # - name: instance1 - # iaws_server: - # service_type: "Route" - # hostname: "" - # port: 443 - # workstream_server_secret: ibm-baw-baw-secret - # external_tls_secret: ibm-baw-ext-tls-secret - # external_tls_ca_secret: ibm-baw-ext-tls-ca-secret - # replicas: 1 - # admin_user: - # tls: - # tls_secret_name: ibm-baw-tls - # tls_trust_list: - # - ums-ingress-tls-secret - # - # # ---------------------------------------------------------------------------------------- - # # images - # # ---------------------------------------------------------------------------------------- - # image: - # repository: cp.icr.io/cp/cp4a/iaws/iaws-server - # tag: 20.0.1 - # pull_policy: IfNotPresent - # pfs_bpd_database_init_job: - # repository: cp.icr.io/cp/cp4a/iaws/pfs-bpd-database-init-prod - # tag: "20.0.1" - # pull_policy: IfNotPresent - # upgrade_job: - # repository: cp.icr.io/cp/cp4a/iaws/iaws-server-dbhandling - # tag: "20.0.1" - # pull_policy: IfNotPresent - # ibm_workplace_job: - # repository: cp.icr.io/cp/cp4a/iaws/iaws-ibm-workplace - # tag: "20.0.1" - # pull_policy: IfNotPresent - # - # # ---------------------------------------------------------------------------------------- - # # PS DB settings. - # # ---------------------------------------------------------------------------------------- - # database: - # ssl: false - # sslsecretname: ibm-dba-baw-db2-cacert - # type: "DB2" - # server_name: "" - # database_name: "" - # port: "50000" - # secret_name: ibm-baw-wfs-server-db-secret - # dbcheck: - # # The maximum waiting time (seconds) to check the database intialization status. - # wait_time: 900 - # # The interval time (seconds) to check. - # interval_time: 15 - # hadr: - # standbydb_host: - # standbydb_port: - # retryinterval: - # maxretries: - # - # # ---------------------------------------------------------------------------------------- - # # Content integration configurations - # # ---------------------------------------------------------------------------------------- - # content_integration: - # init_job_image: - # repository: cp.icr.io/cp/cp4a/iaws/iaws-ps-content-integration - # tag: "20.0.1" - # pull_policy: IfNotPresent - # wait_interval: 60000 - # - # # ---------------------------------------------------------------------------------------- - # # AppEngine configuration - # # ---------------------------------------------------------------------------------------- - # appengine: - # hostname: - # admin_secret_name: ae-admin-secret-instance1 - # - # # ---------------------------------------------------------------------------------------- - # # Resource Registry configuration - # # ---------------------------------------------------------------------------------------- - # resource_registry: - # hostname: - # port: 443 - # admin_secret_name: - # - # # ---------------------------------------------------------------------------------------- - # # JMS configuration - # # ---------------------------------------------------------------------------------------- - # jms: - # image: - # repository: cp.icr.io/cp/cp4a/iaws/jms - # tag: "20.0.1" - # pull_policy: IfNotPresent - # tls: - # tls_secret_name: dummy-jms-tls-secret - # resources: - # limits: - # memory: "2Gi" - # cpu: "1000m" - # requests: - # memory: "512Mi" - # cpu: "200m" - # storage: - # persistent: true - # size: "2Gi" - # use_dynamic_provisioning: false - # access_modes: - # - ReadWriteOnce - # storage_class: "jms-storage-class" - # # if you do not need selector, please comment or remove below selector section - # selector: - # label: "" - # value: "" - # - # # ---------------------------------------------------------------------------------------- - # # Resource limitation - # # ---------------------------------------------------------------------------------------- - # resources: - # limits: - # cpu: 3 - # memory: 2096Mi - # requests: - # cpu: 2 - # memory: 1048Mi - # - # # ---------------------------------------------------------------------------------------- - # # Resource limitation for init containers - # # ---------------------------------------------------------------------------------------- - # resource_init: - # limits: - # cpu: 500m - # memory: 256Mi - # requests: - # cpu: 200m - # memory: 128Mi - # - # # ---------------------------------------------------------------------------------------- - # # liveness and readiness probes - # # ---------------------------------------------------------------------------------------- - # probe: - # ws: - # liveness_probe: - # initial_delay_seconds: 240 - # readinessProbe: - # initial_delay_seconds: 180 - # - # # ---------------------------------------------------------------------------------------- - # # trace settings. - # # ---------------------------------------------------------------------------------------- - # logs: - # console_format: "json" - # console_log_level: "INFO" - # console_source: "message,trace,accessLog,ffdc,audit" - # message_format: "basic" - # trace_format: "ENHANCED" - # trace_specification: "*=info" - # - # # ---------------------------------------------------------------------------------------- - # # custom configuration in Liberty server.xml, put the custom.xml in secret with key "sensitiveCustomConfig" - # # kubectl create secret generic wfs-custom-xml-secret --from-file=sensitiveCustomConfig=./custom.xml - # # ---------------------------------------------------------------------------------------- - # custom_xml_secret_name: - # - # # ---------------------------------------------------------------------------------------- - # # custom configuraiton in 100Custom.xml, put the 100Custom.xml in secret with key "sensitiveCustomConfig" - # # kubectl create secret generic wfs-lombardi-custom-xml-secret --from-file=sensitiveCustomConfig=./100Custom.xml - # # ---------------------------------------------------------------------------------------- - # lombardi_custom_xml_secret_name: - ######################################################################## - ######## IBM Process Federation Server configuration ######## - ######################################################################## - #pfs_configuration: - # pfs: - # hostname: "" - # port: 443 - # service_type: Route - # - # image: - # repository: cp.icr.io/cp/cp4a/iaws/pfs-prod - # tag: "20.0.1" - # pull_policy: IfNotPresent - # - # replicas: 1 - # service_account: - # anti_affinity: hard - # - # admin_secret_name: ibm-pfs-admin-secret - # config_dropins_overrides_secret: ibm-pfs-config - # resources_security_secret: "" - # - # external_tls_secret: - # external_tls_ca_secret: - # tls: - # tls_secret_name: - # tls_trust_list: - # - ums-tls-crt-secret - # - # resources: - # requests: - # cpu: 500m - # memory: 512Mi - # limits: - # cpu: 2 - # memory: 4Gi - # liveness_probe: - # initial_delay_seconds: 300 - # readiness_probe: - # initial_delay_seconds: 240 - # saved_searches: - # index_name: ibmpfssavedsearches - # index_number_of_shards: 3 - # index_number_of_replicas: 1 - # index_batch_size: 100 - # update_lock_expiration: 5m - # unique_constraint_expiration: 5m - # - # security: - # sso: - # domain_name: - # cookie_name: "ltpatoken2" - # ltpa: - # filename: "ltpa.keys" - # expiration: "120m" - # monitor_interval: "60s" - # ssl_protocol: SSL - # - # executor: - # max_threads: "80" - # core_threads: "40" - # - # rest: - # user_group_check_interval: "300s" - # system_status_check_interval: "60s" - # bd_fields_check_interval: "300s" - # - # custom_env_variables: - # names: - # # - name: MY_CUSTOM_ENVIRONMENT_VARIABLE - # secret: - # - # output: - # storage: - # use_dynamic_provisioning: false - # size: 5Gi - # storage_class: "pfs-output" - # - # logs: - # console_format: "json" - # console_log_level: "INFO" - # console_source: "message,trace,accessLog,ffdc,audit" - # trace_format: "ENHANCED" - # trace_specification: "*=info" - # storage: - # use_dynamic_provisioning: false - # size: 5Gi - # storage_class: "pfs-logs" - # - # dba_resource_registry: - # image: - # repository: cp.icr.io/cp/cp4a/aae/dba-etcd - # tag: 20.0.1 - # pull_policy: IfNotPresent - # lease_ttl: 120 - # pfs_check_interval: 10 - # pfs_connect_timeout: 10 - # pfs_response_timeout: 30 - # pfs_registration_key: /dba/appresources/IBM_PFS/PFS_SYSTEM - # tls_secret: rr-tls-client-secret - # resources: - # limits: - # memory: '512Mi' - # cpu: '500m' - # requests: - # memory: '512Mi' - # cpu: '200m' - # - # # ---------------------------------------------------- - # # PFS Embedded Elasticsearch configuration - # # ---------------------------------------------------- - # elasticsearch: - # es_image: - # repository: cp.icr.io/cp/cp4a/iaws/pfs-elasticsearch-prod - # tag: "20.0.1" - # pull_policy: IfNotPresent - # - # pfs_init_image: - # repository: cp.icr.io/cp/cp4a/iaws/pfs-init-prod - # tag: "20.0.1" - # pull_policy: IfNotPresent - # - # nginx_image: - # repository: cp.icr.io/cp/cp4a/iaws/pfs-nginx-prod - # tag: "20.0.1" - # pull_policy: IfNotPresent - # - # replicas: 1 - # service_type: NodePort - # external_port: - # anti_affinity: hard - # service_account: - # privileged: true - # probe_initial_delay: 90 - # heap_size: "1024m" - # - # resources: - # limits: - # memory: "2Gi" - # cpu: "1000m" - # requests: - # memory: "1Gi" - # cpu: "100m" - # - # storage: - # persistent: true - # use_dynamic_provisioning: false - # size: 10Gi - # storage_class: "pfs-es" - # - # snapshot_storage: - # enabled: false - # use_dynamic_provisioning: false - # size: 30Gi - # storage_class_name: "" - # existing_claim_name: "" - # - # security: - # users_secret: "" -# ca_configuration: -# global: -# arch: "amd64" -# service_type: "Route" # required, supported service type for application engine is: Route or NodePort. -# frontend_external_hostname: "www.frontend.com" # required, if service_type is Route. Otherwise leave blank -# backend_external_hostname: "www.backend.com" # required, if service_type is Route. Otherwise leave blank -# ldap_secret: -# db_secret: -# image: -# repository: "cp.icr.io/cp/cp4a/baca" -# tag: "20.0.1" -# pull_policy: "IfNotPresent" -# pull_secrets: "baca-docker-secret" # Specify secret name for image pull -# authentication_type: 1 # 0-Non-ldap, 1-LDAP, 2- User Management Service integration -# retries: "180" # The max of retrying for CA deployment verification task until all the pods are in Ready status. A delay of 20 seconds between each attempt. -# bas: -# bas_enabled: "false" -# celery: -# process_timeout: 300 -# configs: -# claimname: "sp-config-pvc" -# logs: -# claimname: "sp-log-pvc" -# log_level: "info" -# data: -# claimname: "sp-data-pvc" -# redis: -# resources: -# limits: -# memory: "640Mi" -# cpu: "0.25" -# replica_count: 3 -# quorum: 2 -# rabbitmq: -# resources: -# limits: -# memory: "640Mi" -# cpu: "0.5" -# replica_count: 3 -# mongo: -# configdb_claimname: "sp-data-pvc" -# shard_claimname: "sp-data-pvc" -# mongo_limited_memory: "1600Mi" -# wired_tiger_cache: ".3" -# replica_count: 2 -# mongoadmin: -# admin_configdb_claimname: "sp-data-pvc" -# admin_shard_claimname: "sp-data-pvc" -# mongo_limited_memory: "1600Mi" -# wired_tiger_cache: ".3" -# replica_count: 2 -# caller_api: -# replica_count: 2 -# resources: -# limits: -# memory: "480Mi" -# cpu: "0.6" -# spbackend: -# replica_count: 2 -# resources: -# limits: -# memory: "640Mi" -# cpu: "0.6" -# spfrontend: -# replica_count: 2 -# resources: -# limits: -# memory: "480Mi" -# cpu: "0.6" -# backend_host: "" -## frontend_host: "" -# sso: "false" -# postprocessing: -# process_timeout: 1500 -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "480Mi" -# cpu: "0.6" -# pdfprocess: -# process_timeout: 1500 -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "960Mi" -# cpu: "0.6" -# utfprocess: -# process_timeout: 1500 -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "960Mi" -# cpu: "1" -# setup: -# process_timeout: 120 -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "480Mi" -# cpu: "0.6" -# ocrextraction: -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "1440Mi" -# cpu: "1" -# classifyprocess: -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "960Mi" -# cpu: "1" -# processingextraction: -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "1440Mi" -# cpu: "1" -# updatefiledetail: -# replica_count: 2 -# max_unavailable_count: 1 -# resources: -# limits: -# memory: "480Mi" -# cpu: "0.6" - ######################################################################## - ######## IBM Business Automation Insights configuration ######## - ######################################################################## - #bai_configuration: -# imageCredentials: -# imagePullSecret: "admin.registrykey" -# persistence: -# useDynamicProvisioning: true -# flinkPv: -# storageClassName: "" -# kafka: -# bootstrapServers: "kafka.bootstrapserver1.hostname:9092,kafka.bootstrapserver2.hostname:9092,kafka.bootstrapserver3.hostname:9092" -# securityProtocol: "PLAINTEXT" -# settings: -# egress: false -# ingressTopic: icp4adeploy-ibm-bai-ingress -# egressTopic: icp4adeploy-ibm-bai-egress -# serviceTopic: icp4adeploy-ibm-bai-serviceTopic -# setup: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-setup -# tag: "20.0.1" -# admin: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-admin -# tag: "20.0.1" -# flink: -# initStorageDirectory: true -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-flink -# tag: "20.0.1" -# zookeeper: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-flink-zookeeper -# tag: "20.0.1" -# ingestion: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-ingestion -# tag: "20.0.1" -# adw: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-adw -# tag: "20.0.1" -# bpmn: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-bpmn -# tag: "20.0.1" -# bawadv: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-bawadv -# tag: "20.0.1" -# icm: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-icm -# tag: "20.0.1" -# odm: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-odm -# tag: "20.0.1" -# content: -# install: false -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-content -# tag: "20.0.1" -# initImage: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-init -# tag: "20.0.1" -# elasticsearch: -# install: true -# ibm-dba-ek: -# image: -# imagePullPolicy: IfNotPresent -# imagePullSecret: "admin.registrykey" -# elasticsearch: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-elasticsearch -# tag: "20.0.1" -# init: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-init -# tag: "20.0.1" -# data: -# storage: -# persistent: true -# useDynamicProvisioning: true -# storageClass: "" -# snapshotStorage: -# enabled: true -# useDynamicProvisioning: true -# storageClassName: "" -# kibana: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-kibana -# tag: "20.0.1" -# init: -# image: -# repository: cp.icr.io/cp/cp4a/bai/bai-init -# tag: "20.0.1" - ######################################################################## - ######## IBM Business Automation Digital Worker Configuration ######## - ######################################################################## - - #adw_configuration: -# global: -# imagePullSecret: baiw-reg-cred -# kubernetes: -# serviceAccountName: "" - -# adwSecret: "" - -# grantWritePermissionOnMountedVolumes: true - -# logLevel: "error" - -# networkPolicy: -# enabled: true - -# registry: -# endpoint: "" - -# npmRegistry: -# persistence: -# enabled: true -# useDynamicProvisioning: true -# storageClassName: "managed-nfs-storage" - -# mongodb: -# replicas: 2 -# persistence: -# enabled: true -# useDynamicProvisioning: true -# storageClassName: "managed-nfs-storage" - - -# designer: -# image: -# repository: "cp.icr.io/cp/cp4a/adw/adw-designer" -# pullPolicy: "IfNotPresent" -# externalUrl: "" - -# runtime: -# image: -# repository: "cp.icr.io/cp/cp4a/adw/adw-runtime" -# pullPolicy: "IfNotPresent" -# persistence: -# useDynamicProvisioning: true -# storageClassName: "managed-nfs-storage" -# service: -# type: "NodePort" -# externalPort: 30711 -# runLogLevel: "warn" -# externalUrl: "" - - -# management: -# image: -# repository: "cp.icr.io/cp/cp4a/adw/adw-management" -# pullPolicy: "IfNotPresent" -# persistence: -# useDynamicProvisioning: true -# storageClassName: "managed-nfs-storage" - -# setup: -# image: -# repository: "cp.icr.io/cp/cp4a/adw/adw-setup" -# pullPolicy: "IfNotPresent" - -# init: -# image: -# repository: "cp.icr.io/cp/cp4a/adw/adw-init" -# pullPolicy: "IfNotPresent" - -# baiKafka: -# topic: "BAITOPICFORODM" -# bootstrapServers: "" -# securityProtocol: "SASL_SSL" - -# baiElasticsearch: -# url: "" - -# oidc: -# endpoint: "" diff --git a/descriptors/ibm_cp4a_crd.yaml b/descriptors/ibm_cp4a_crd.yaml index 3e27c8cc..9c65543b 100644 --- a/descriptors/ibm_cp4a_crd.yaml +++ b/descriptors/ibm_cp4a_crd.yaml @@ -16,7 +16,7 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: group: icp4a.ibm.com names: @@ -41,6 +41,20 @@ spec: properties: appVersion: type: string + spec: + properties: + version: + type: string + type: object + status: + description: CloudPakAutomationStatus defines the observed state of Automation + properties: + cpAutoStatus: + description: It will be as "OK when all objects are created successfully + type: string + required: + - cpAutoStatus + type: object license: type: string pattern: '^accept$' diff --git a/descriptors/operator-shared-pvc.yaml b/descriptors/operator-shared-pvc.yaml index 7b626df5..8e626944 100644 --- a/descriptors/operator-shared-pvc.yaml +++ b/descriptors/operator-shared-pvc.yaml @@ -16,7 +16,7 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: accessModes: - ReadWriteMany diff --git a/descriptors/operator.yaml b/descriptors/operator.yaml index 91ed8507..443a68a2 100644 --- a/descriptors/operator.yaml +++ b/descriptors/operator.yaml @@ -16,7 +16,7 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: replicas: 1 selector: @@ -29,11 +29,11 @@ spec: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 annotations: productID: "5737-I23" productName: "IBM Cloud Pak for Automation" - productVersion: "20.0.1" + productVersion: "20.0.2" spec: hostNetwork: false hostPID: false @@ -65,13 +65,14 @@ spec: - /tmp/ansible-operator/runner - stdout # Replace this with the built image name - image: "cp.icr.io/cp/cp4a/icp4a-operator:20.0.1" + image: "cp.icr.io/cp/cp4a/icp4a-operator:20.0.2" imagePullPolicy: "IfNotPresent" securityContext: allowPrivilegeEscalation: false privileged: false readOnlyRootFilesystem: false - runAsNonRoot: true + # Uncomment below for OCP 3.11 + # runAsUser: 1001 capabilities: drop: - ALL @@ -89,7 +90,7 @@ spec: name: operator-shared-folder - name: operator # Replace this with the built image name - image: "cp.icr.io/cp/cp4a/icp4a-operator:20.0.1" + image: "cp.icr.io/cp/cp4a/icp4a-operator:20.0.2" imagePullPolicy: "IfNotPresent" livenessProbe: exec: @@ -111,7 +112,8 @@ spec: allowPrivilegeEscalation: false privileged: false readOnlyRootFilesystem: false - runAsNonRoot: true + # Uncomment below for OCP 3.11 + # runAsUser: 1001 capabilities: drop: - ALL @@ -130,6 +132,8 @@ spec: args: - '--zap-time-encoding=iso8601' - '--zap-encoder=console' + - "--max-workers" + - "10" env: - name: WATCH_NAMESPACE valueFrom: @@ -141,7 +145,7 @@ spec: fieldPath: metadata.name - name: OPERATOR_NAME value: "ibm-cp4a-operator" - - name: WORKER_FOOSERVICE_CACHE_EXAMPLE_COM + - name: WORKER_ICP4ACLUSTER_ICP4A_IBM_COM value: "10" # MUST exist, used to accept dba license, valid value only can be "accept" - name: dba_license diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_aca.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_aca.yaml index 3de2205b..68ded0a2 100644 --- a/descriptors/patterns/ibm_cp4a_cr_demo_aca.yaml +++ b/descriptors/patterns/ibm_cp4a_cr_demo_aca.yaml @@ -1,3 +1,4 @@ + ############################################################################### # # Licensed Materials - Property of IBM @@ -16,31 +17,63 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: - appVersion: 20.0.1 - ## TIPS: The names of all vaddkriables in the spec field are converted to snake_case by the operator before running ansible + appVersion: 20.0.2 + ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible ## For example, serviceAccount in the spec becomes service_account in ansible. ## It is recommended that you perform some type validation in Ansible on the variables to ensure that - ## your application is receiving expected input. + ## your contentanalyzer is receiving expected input. ## shared configuration among all tribe shared_configuration: + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: non-production + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## CP4A patterns or capabilities to be deployed. This CR represents the "contentanalyzer" pattern, which includes the following + ## mandatory components: ums, rr, icn (BAN/Navigator), bastudio sc_deployment_patterns: contentanalyzer - sc_optional_components: #Values will be populated by a script based on user's input. Possible values are LDAP,UMS. When sc_optional_components is blank, ACA will be deployed without LDAP integration. + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: enterprise, demo sc_deployment_type: demo - sc_deployment_hostname_suffix: "infra-node-name" # BACA already include {{ meta.name }} in playbook, We use this to compose frontend_external_hostname and backend_external_hostname - # Possible values OCP,ROKS + + ## This is the deployment platform supported for CP4A and the possible values are: ROKS, OCP, and other (which includes all Certified Kubernetes platforms) sc_deployment_platform: OCP - root_ca_secret: "{{ meta.name }}-root-ca" + + sc_deployment_hostname_suffix: "{{ meta.name }}" + storage_configuration: - sc_dynamic_storage_classname: "managed-nfs-storage" + sc_dynamic_storage_classname: "" + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: "{{ meta.name }}-root-ca" + images: + + ## All CP4A components should use this pull_policy as the default, but it can override by each component + pull_policy: IfNotPresent + keytool_job_container: - repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer - tag: 20.0.1 + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer + tag: 20.0.2 keytool_init_container: - repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer - tag: 20.0.1 + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob + tag: 20.0.2 db2u_tools: repository: docker.io/ibmcom/db2u.tools tag: 11.5.1.0-CN1 @@ -62,20 +95,52 @@ spec: busybox: repository: docker.io/library/busybox tag: latest - - + phpldapadmin: + repository: osixia/phpldapadmin + tag: 0.9.0 + ######################################################################## + ######## IBM Content Analyzer configuration ######## + ######################################################################## ca_configuration: global: image: repository: "cp.icr.io/cp/cp4a/baca" - tag: "20.0.1" - pull_policy: "IfNotPresent" + tag: 20.0.2 ######################################################################## - ######## IBM User and Group Management Service configuration ######## + ######## IBM Business Automation Navigator configuration ######## ######################################################################## -# ums_configuration: -# images: -# ums: -# repository: cp.icr.io/cp/cp4a/ums/ums -# tag: 20.0.1 + navigator_configuration: + image: + repository: cp.icr.io/cp/cp4a/ban/navigator + tag: ga-308-icn + + ######################################################################## + ######## IBM Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + resource_registry: + repository: cp.icr.io/cp/cp4a/aae/dba-etcd + tag: 20.0.2 + + ######################################################################## + ######## IBM Business Automation Studio configuration ######## + ######################################################################## + bastudio_configuration: + images: + bastudio: + repository: cp.icr.io/cp/cp4a/bas/bastudio + tag: 20.0.2 + jms_server: + image: + repository: cp.icr.io/cp/cp4a/bas/jms + tag: 20.0.2 + playback_server: + images: + db_job: + repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/bas/solution-server + tag: 20.0.2 diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml index 886eedca..cee63a18 100644 --- a/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml +++ b/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml @@ -16,9 +16,9 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: - appVersion: 20.0.1 + appVersion: 20.0.2 ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible ## For example, serviceAccount in the spec becomes service_account in ansible. ## It is recommended that you perform some type validation in Ansible on the variables to ensure that @@ -26,6 +26,7 @@ spec: ## shared configuration among all tribe shared_configuration: sc_deployment_patterns: application + sc_optional_components: app_designer sc_deployment_type: demo sc_deployment_platform: OCP sc_deployment_hostname_suffix: "{{ meta.name }}" @@ -37,16 +38,16 @@ spec: images: keytool_job_container: repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer - tag: 20.0.1 + tag: 20.0.2 dbcompatibility_init_container: repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer - tag: 20.0.1 + tag: 20.0.2 keytool_init_container: repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer - tag: 20.0.1 + tag: 20.0.2 umsregistration_initjob: repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob - tag: 20.0.1 + tag: 20.0.2 db2u_tools: repository: docker.io/ibmcom/db2u.tools tag: 11.5.1.0-CN1 @@ -68,13 +69,16 @@ spec: busybox: repository: docker.io/library/busybox tag: latest + phpldapadmin: + repository: osixia/phpldapadmin + tag: 0.9.0 ######################################################################## ######## IBM Business Automation Navigator configuration ######## ######################################################################## navigator_configuration: image: repository: cp.icr.io/cp/cp4a/ban/navigator-sso - tag: ga-307-icn-if002 + tag: ga-308-icn ######################################################################## ######## IBM Business Automation Studio configuration ######## ######################################################################## @@ -82,19 +86,19 @@ spec: images: bastudio: repository: cp.icr.io/cp/cp4a/bas/bastudio - tag: 20.0.1 + tag: 20.0.2 jms_server: image: repository: cp.icr.io/cp/cp4a/bas/jms - tag: 20.0.1 + tag: 20.0.2 playback_server: images: db_job: repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db - tag: 20.0.1 + tag: 20.0.2 solution_server: repository: cp.icr.io/cp/cp4a/bas/solution-server - tag: 20.0.1 + tag: 20.0.2 ######################################################################## ######## IBM Resource Registry configuration ######## ######################################################################## @@ -102,14 +106,12 @@ spec: images: resource_registry: repository: cp.icr.io/cp/cp4a/aae/dba-etcd - tag: 20.0.1 + tag: 20.0.2 + ######################################################################## ######## IBM User and Group Management Service configuration ######## ######################################################################## ums_configuration: teamserver: admingroup: P8Administrators - images: - ums: - repository: cp.icr.io/cp/cp4a/ums/ums - tag: 20.0.1 + \ No newline at end of file diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_content.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_content.yaml index 78d63877..d8e5081e 100644 --- a/descriptors/patterns/ibm_cp4a_cr_demo_content.yaml +++ b/descriptors/patterns/ibm_cp4a_cr_demo_content.yaml @@ -16,19 +16,37 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: - appVersion: 20.0.1 + appVersion: 20.0.2 ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible ## For example, serviceAccount in the spec becomes service_account in ansible. ## It is recommended that you perform some type validation in Ansible on the variables to ensure that ## your application is receiving expected input. ## shared configuration among all tribe shared_configuration: + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the rest of the licenses. + sc_deployment_fncm_license: non-production + + ## CP4A patterns or capabilities to be deployed. This CR represents the "content" pattern (aka FileNet Content Manager), which includes the following + ## mandatory components: cpe, icn (BAN/Navigator), graphql and optional components: cmis, es (External Share). sc_deployment_patterns: content + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: cmis, css (Content Search Services), es (External Share). sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: demo, non-production, and production sc_deployment_type: demo - sc_deployment_platform: OCP + + ## Specify the RunAsUser for the security context of the pod. This is usually a numeric value that correponds to a user ID + sc_run_as_user: + + ## This is the deployment platform supported for CP4A and the possible values are: ROKS, OCP, and other (which includes all Certified Kubernetes platforms) + sc_deployment_platform: + sc_deployment_hostname_suffix: "{{ meta.name }}" storage_configuration: sc_dynamic_storage_classname: "" @@ -54,6 +72,9 @@ spec: busybox: repository: docker.io/library/busybox tag: latest + phpldapadmin: + repository: osixia/phpldapadmin + tag: 0.9.0 ######################################################################## ######## IBM FileNet Content Manager configuration ######## ######################################################################## @@ -61,23 +82,24 @@ spec: cpe: image: repository: cp.icr.io/cp/cp4a/fncm/cpe - tag: ga-554-p8cpe-if001 + tag: ga-555-p8cpe css: image: repository: cp.icr.io/cp/cp4a/fncm/css - tag: ga-554-p8css-if001 + tag: ga-555-p8css graphql: image: repository: cp.icr.io/cp/cp4a/fncm/graphql - tag: ga-554-p8cgql-if001 -# cmis: -# image: -# repository: cp.icr.io/cp/cp4a/fncm/cmis -# tag: ga-304-cmis-if010 + tag: ga-555-p8cgql + cmis: + image: + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis ######################################################################## ######## IBM Business Automation Navigator configuration ######## ######################################################################## navigator_configuration: image: repository: cp.icr.io/cp/cp4a/ban/navigator - tag: ga-307-icn-if002 + tag: ga-308-icn + \ No newline at end of file diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_decisions.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_decisions.yaml index 80bf5fb8..c4e56a48 100644 --- a/descriptors/patterns/ibm_cp4a_cr_demo_decisions.yaml +++ b/descriptors/patterns/ibm_cp4a_cr_demo_decisions.yaml @@ -16,9 +16,9 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 spec: - appVersion: 20.0.1 + appVersion: 20.0.2 ## TIPS: The names of all vaddkriables in the spec field are converted to snake_case by the operator before running ansible ## For example, serviceAccount in the spec becomes service_account in ansible. ## It is recommended that you perform some type validation in Ansible on the variables to ensure that @@ -30,6 +30,7 @@ spec: sc_deployment_type: demo # Possible values OCP,ROKS sc_deployment_platform: OCP + sc_deployment_hostname_suffix: "{{ meta.name }}" storage_configuration: # For ROKS you can use : ibmc-file-gold or ibmc-file-silver value sc_dynamic_storage_classname: "" diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_decisions_ads.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_decisions_ads.yaml new file mode 100644 index 00000000..e6abf570 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_demo_decisions_ads.yaml @@ -0,0 +1,188 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + shared_configuration: + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: non-production + + ## CP4A patterns or capabilities to be deployed. The base pattern is "foundation pattern", which includes BAN (Navigator) and RR. + sc_deployment_patterns: decisions_ads + + ## The optional component for 'decisions_ads' are 'ads_designer' and 'ads_runtime'. + # In demo mode, both are always installed and set in 'sc_optional_components' list by the user installation script. + sc_optional_components: + + ## This is the deployment type and the possible values are: demo, non-production, and production. + sc_deployment_type: demo + + ## This is the version of the OCP/ROKS platform and the possible values are: 3.11, 4.2, and 4.3. This is usually set by the User script and it is + ## used by the Operator to determine the version to perform the pre-requisites in order to deploy Db2. + sc_deployment_platform: + + sc_deployment_hostname_suffix: "{{ meta.name }}" + + storage_configuration: + ## For OCP 3.11 or 4.x, the storage class will be provided by the user + ## during the execution of the User script (cp4a-deployment.sh) + sc_dynamic_storage_classname: + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob + tag: 20.0.2 + db2u_tools: + repository: docker.io/ibmcom/db2u.tools + tag: 11.5.1.0-CN1 + db2: + repository: docker.io/ibmcom/db2 + tag: 11.5.1.0-CN1 + db2_auxiliary: + repository: docker.io/ibmcom/db2u.auxiliary.auth + tag: 11.5.1.0-CN1 + db2_init: + repository: docker.io/ibmcom/db2u.instdb + tag: 11.5.1.0-CN1 + db2_etcd: + repository: quay.io/coreos/etcd + tag: v3.3.10 + openldap: + repository: osixia/openldap + tag: 1.3.0 + busybox: + repository: docker.io/library/busybox + tag: latest + phpldapadmin: + repository: osixia/phpldapadmin + tag: 0.9.0 + + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + image: + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + + ######################################################################## + ######## IBM Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + resource_registry: + repository: cp.icr.io/cp/cp4a/aae/dba-etcd + tag: 20.0.2 + + ######################################################################## + ######## IBM Business Automation Studio configuration ######## + ######################################################################## + bastudio_configuration: + images: + bastudio: + repository: cp.icr.io/cp/cp4a/bas/bastudio + tag: 20.0.2 + jms_server: + image: + repository: cp.icr.io/cp/cp4a/bas/jms + tag: 20.0.2 + playback_server: + images: + db_job: + repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/bas/solution-server + tag: 20.0.2 + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + + + ######################################################################## + ######## IBM Business Automation Decision Services ######## + ######################################################################## + ads_configuration: + + decision_designer: + enabled: true + + decision_runtime: + enabled: true + + rr_integration: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-rrintegration + tag: 20.0.2 + + front: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-front + tag: 20.0.2 + + download_service: + ums_enabled: true + image: + repository: cp.icr.io/cp/cp4a/ads/ads-download + tag: 20.0.2 + + rest_api: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-restapi + tag: 20.0.2 + + git_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-gitservice + tag: 20.0.2 + + parsing_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-parsing + tag: 20.0.2 + + run_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-run + tag: 20.0.2 + + decision_runtime_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-runtime + tag: 20.0.2 + diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_digitalworker.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_digitalworker.yaml new file mode 100644 index 00000000..ca9917f2 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_demo_digitalworker.yaml @@ -0,0 +1,35 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: digitalworker + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + shared_configuration: + sc_deployment_patterns: digitalworker + sc_optional_components: + sc_deployment_type: demo + # Possible values OCP,ROKS + sc_deployment_platform: OCP + storage_configuration: + # For ROKS you can use : ibmc-file-gold or ibmc-file-silver value + sc_dynamic_storage_classname: "" + + ####################################################################### + ######## IBM Automation Digital Worker ######## + ####################################################################### + adw_configuration: diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_foundation.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_foundation.yaml new file mode 100644 index 00000000..4021ec0e --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_demo_foundation.yaml @@ -0,0 +1,164 @@ + +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ## CP4A application version + appVersion: 20.0.2 + + shared_configuration: + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob + tag: 20.0.2 + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: non-production + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: non-production + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: non-production + + ## CP4A patterns or capabilities to be deployed. The base pattern is "foundation pattern", which includes BAN (Navigator) and RR. + sc_deployment_patterns: foundation + + ## The optional components for the "foundation pattern" are ums, bas, and bai. If the user selects any of those optional components, + ## it will be set here. + sc_optional_components: + + ## This is the deployment type and the possible values are: demo and enterprise. + sc_deployment_type: demo + + ## This is the deployment platform supported for CP4A and the possible values are: ROKS, OCP, and other (which includes all Certified Kubernetes platforms) + sc_deployment_platform: + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + sc_deployment_hostname_suffix: {{ meta.name }} + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + storage_configuration: + ## For OCP 3.11 or 4.x, the storage class will be provided by the user + ## during the execution of the User script (cp4a-deployment.sh) + sc_dynamic_storage_classname: + + ## For ROKS (v3.x or v4.x), the Admin script creates the storage classes with the following names: cp4a-file-retain-bronze-gid, cp4a-file-retain-silver-gid, + ## and cp4a-file-retain-gold-gid + sc_slow_file_storage_classname: cp4a-file-retain-bronze-gid + sc_medium_file_storage_classname: cp4a-file-retain-silver-gid + sc_fast_file_storage_classname: cp4a-file-retain-gold-gid + + ############################################################################################## + # Kafka client configuration for IBM Business Automation Insights and other ICP4A products. + # + # Only used when BAI is selected as optional component. + ############################################################################################## + kafka_configuration: + # Comma-separated list of hosts:port for connection to the Kafka cluster. + # This field is mandatory for any Kafka configuration. + bootstrap_servers: + # Value for the Kafka security.protocol property + # Valid values: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + security_protocol: + # Value for the Kafka sasl.mechanism property + # Valid values: PLAIN, SCRAM-SHA-512. Default: PLAIN. + sasl_mechanism: + # If the Kafka server requires authentication or uses SSL communications, the value of this field + # must provide the name of a secret that holds the following keys as base64-encoded strings: + # kafka-username: Kafka username; leave empty if no authentication + # kafka-password: Kafka password; leave empty if no authentication + # kafka-server-certificate: server certificate for SSL communications; leave empty if SSL protocol is not used + connection_secret_name: + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + image: + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + + ######################################################################## + ######## IBM Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + resource_registry: + repository: cp.icr.io/cp/cp4a/aae/dba-etcd + tag: 20.0.2 + + ######################################################################## + ######## IBM Business Automation Studio configuration ######## + ######################################################################## + bastudio_configuration: + images: + bastudio: + repository: cp.icr.io/cp/cp4a/bas/bastudio + tag: 20.0.2 + jms_server: + image: + repository: cp.icr.io/cp/cp4a/bas/jms + tag: 20.0.2 + playback_server: + images: + db_job: + repository: cp.icr.io/cp/cp4a/bas/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/bas/solution-server + tag: 20.0.2 + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + + ######################################################################## + ######## IBM Business Automation Insights configuration ######## + ######################################################################## + bai_configuration: + imageCredentials: + registry: cp.icr.io/cp/cp4a + businessPerformanceCenter: + allUsersAccess: true + createRoutes: true diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_workflow-workstreams.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_workflow-workstreams.yaml new file mode 100644 index 00000000..44d41673 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_demo_workflow-workstreams.yaml @@ -0,0 +1,184 @@ + +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: workflow-workstreams + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible + ## For example, serviceAccount in the spec becomes service_account in ansible. + ## It is recommended that you perform some type validation in Ansible on the variables to ensure that + ## your application is receiving expected input. + ## shared configuration among all tribe + shared_configuration: + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: non-production + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: non-production + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: non-production + + sc_deployment_patterns: workflow-workstreams + sc_optional_components: + sc_deployment_type: demo + sc_deployment_platform: OCP + sc_deployment_hostname_suffix: "{{ meta.name }}" + storage_configuration: + sc_dynamic_storage_classname: "" + root_ca_secret: "{{ meta.name }}-root-ca" + image_pull_secrets: + - admin.registrykey + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/baw/dba-umsregistration-initjob + tag: 20.0.2 + db2u_tools: + repository: docker.io/ibmcom/db2u.tools + tag: 11.5.1.0-CN1 + db2: + repository: docker.io/ibmcom/db2 + tag: 11.5.1.0-CN1 + db2_auxiliary: + repository: docker.io/ibmcom/db2u.auxiliary.auth + tag: 11.5.1.0-CN1 + db2_init: + repository: docker.io/ibmcom/db2u.instdb + tag: 11.5.1.0-CN1 + db2_etcd: + repository: quay.io/coreos/etcd + tag: v3.3.10 + openldap: + repository: osixia/openldap + tag: 1.3.0 + busybox: + repository: docker.io/library/busybox + tag: latest + phpldapadmin: + repository: osixia/phpldapadmin + tag: 0.9.0 + ######################################################################## + ######## IBM FileNet Content Manager configuration ######## + ######################################################################## + ecm_configuration: + cpe: + image: + repository: cp.icr.io/cp/cp4a/fncm/cpe + tag: ga-555-p8cpe + cmis: + image: + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + image: + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + ######################################################################## + ######## IBM Application Engine configuration ######## + ######################################################################## + application_engine_configuration: + - name: workspace + images: + db_job: + repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/aae/solution-server + tag: 20.0.2 + ######################################################################## + ######## IBM Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + resource_registry: + repository: cp.icr.io/cp/cp4a/baw/dba-etcd + tag: 20.0.2 + ######################################################################## + ######## IBM Workstream Server configuration ######## + ######################################################################## + baw_configuration: + - name: instance1 + image: + repository: cp.icr.io/cp/cp4a/baw/workflow-server + tag: "20.0.2" + pfs_bpd_database_init_job: + repository: cp.icr.io/cp/cp4a/baw/pfs-bpd-database-init-prod + tag: "20.0.2" + upgrade_job: + repository: cp.icr.io/cp/cp4a/baw/workflow-server-dbhandling + tag: "20.0.2" + ibm_workplace_job: + repository: cp.icr.io/cp/cp4a/baw/iaws-ibm-workplace + tag: "20.0.2" + content_integration: + init_job_image: + repository: cp.icr.io/cp/cp4a/baw/iaws-ps-content-integration + tag: "20.0.2" + case: + init_job_image: + repository: cp.icr.io/cp/cp4a/baw/workflow-server-case-initialization + tag: "20.0.2" + jms: + image: + repository: cp.icr.io/cp/cp4a/baw/jms + tag: "20.0.2" + ######################################################################## + ######## IBM Process Federation Server configuration ######## + ######################################################################## + pfs_configuration: + image: + repository: cp.icr.io/cp/cp4a/baw/pfs-prod + tag: "20.0.2" + ######################################################################## + ######## Elasticsearch configuration ######## + ######################################################################## + elasticsearch_configuration: + es_image: + repository: cp.icr.io/cp/cp4a/baw/pfs-elasticsearch-prod + tag: "20.0.2" + es_init_image: + repository: cp.icr.io/cp/cp4a/baw/pfs-init-prod + tag: "20.0.2" + es_nginx_image: + repository: cp.icr.io/cp/cp4a/baw/pfs-nginx-prod + tag: "20.0.2" diff --git a/descriptors/patterns/ibm_cp4a_cr_demo_workstreams.yaml b/descriptors/patterns/ibm_cp4a_cr_demo_workflow.yaml similarity index 60% rename from descriptors/patterns/ibm_cp4a_cr_demo_workstreams.yaml rename to descriptors/patterns/ibm_cp4a_cr_demo_workflow.yaml index cb662864..6f661c75 100644 --- a/descriptors/patterns/ibm_cp4a_cr_demo_workstreams.yaml +++ b/descriptors/patterns/ibm_cp4a_cr_demo_workflow.yaml @@ -1,164 +1,169 @@ -############################################################################### -# -# Licensed Materials - Property of IBM -# -# (C) Copyright IBM Corp. 2020. All Rights Reserved. -# -# US Government Users Restricted Rights - Use, duplication or -# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. -# -############################################################################### -apiVersion: icp4a.ibm.com/v1 -kind: ICP4ACluster -metadata: - name: workstreams - labels: - app.kubernetes.io/instance: ibm-dba - app.kubernetes.io/managed-by: ibm-dba - app.kubernetes.io/name: ibm-dba - release: 20.0.1 -spec: - appVersion: 20.0.1 - ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible - ## For example, serviceAccount in the spec becomes service_account in ansible. - ## It is recommended that you perform some type validation in Ansible on the variables to ensure that - ## your application is receiving expected input. - ## shared configuration among all tribe - shared_configuration: - sc_deployment_patterns: workstreams - sc_deployment_type: demo - sc_deployment_platform: OCP - sc_deployment_hostname_suffix: "{{ meta.name }}" - storage_configuration: - sc_dynamic_storage_classname: "" - root_ca_secret: "{{ meta.name }}-root-ca" - image_pull_secrets: - - image-pull-secret - images: - keytool_job_container: - repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer - tag: 20.0.1 - dbcompatibility_init_container: - repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer - tag: 20.0.1 - keytool_init_container: - repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer - tag: 20.0.1 - umsregistration_initjob: - repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob - tag: 20.0.1 - db2u_tools: - repository: docker.io/ibmcom/db2u.tools - tag: 11.5.1.0-CN1 - db2: - repository: docker.io/ibmcom/db2 - tag: 11.5.1.0-CN1 - db2_auxiliary: - repository: docker.io/ibmcom/db2u.auxiliary.auth - tag: 11.5.1.0-CN1 - db2_init: - repository: docker.io/ibmcom/db2u.instdb - tag: 11.5.1.0-CN1 - db2_etcd: - repository: quay.io/coreos/etcd - tag: v3.3.10 - openldap: - repository: osixia/openldap - tag: 1.3.0 - busybox: - repository: docker.io/library/busybox - tag: latest - - ######################################################################## - ######## IBM FileNet Content Manager configuration ######## - ######################################################################## - ecm_configuration: - cpe: - image: - repository: cp.icr.io/cp/cp4a/fncm/cpe - tag: ga-554-p8cpe-if001 - cmis: - image: - repository: cp.icr.io/cp/cp4a/fncm/cmis - tag: ga-304-cmis-if010 - ######################################################################## - ######## IBM Business Automation Navigator configuration ######## - ######################################################################## - navigator_configuration: - image: - repository: cp.icr.io/cp/cp4a/ban/navigator-sso - tag: ga-307-icn-if002 - ######################################################################## - ######## IBM User and Group Management Service configuration ######## - ######################################################################## - ums_configuration: - teamserver: - admingroup: P8Administrators - images: - ums: - repository: cp.icr.io/cp/cp4a/ums/ums - tag: 20.0.1 - ######################################################################## - ######## IBM Application Engine configuration ######## - ######################################################################## - application_engine_configuration: - - name: workspace - images: - db_job: - repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db - tag: 20.0.1 - solution_server: - repository: cp.icr.io/cp/cp4a/aae/solution-server - tag: 20.0.1 - ######################################################################## - ######## IBM Resource Registry configuration ######## - ######################################################################## - resource_registry_configuration: - images: - resource_registry: - repository: cp.icr.io/cp/cp4a/aae/dba-etcd - tag: 20.0.1 - ######################################################################## - ######## IBM Workstream Server configuration ######## - ######################################################################## - iaws_configuration: - - name: instance1 - iaws_server: - image: - repository: cp.icr.io/cp/cp4a/iaws/iaws-server - tag: 20.0.1 - pfs_bpd_database_init_job: - repository: cp.icr.io/cp/cp4a/iaws/pfs-bpd-database-init-prod - tag: "20.0.1" - upgrade_job: - repository: cp.icr.io/cp/cp4a/iaws/iaws-server-dbhandling - tag: "20.0.1" - ibm_workplace_job: - repository: cp.icr.io/cp/cp4a/iaws/iaws-ibm-workplace - tag: "20.0.1" - content_integration: - init_job_image: - repository: cp.icr.io/cp/cp4a/iaws/iaws-ps-content-integration - tag: "20.0.1" - jms: - image: - repository: cp.icr.io/cp/cp4a/iaws/jms - tag: "20.0.1" - ######################################################################## - ######## IBM Process Federation Server configuration ######## - ######################################################################## - pfs_configuration: - pfs: - image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-prod - tag: "20.0.1" - elasticsearch: - es_image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-elasticsearch-prod - tag: "20.0.1" - pfs_init_image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-init-prod - tag: "20.0.1" - nginx_image: - repository: cp.icr.io/cp/cp4a/iaws/pfs-nginx-prod - tag: "20.0.1" + +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: workflow + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible + ## For example, serviceAccount in the spec becomes service_account in ansible. + ## It is recommended that you perform some type validation in Ansible on the variables to ensure that + ## your application is receiving expected input. + ## shared configuration among all tribe + shared_configuration: + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: non-production + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: non-production + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: non-production + + sc_deployment_patterns: workflow + sc_optional_components: + sc_deployment_type: demo + sc_deployment_platform: OCP + sc_deployment_hostname_suffix: "{{ meta.name }}" + storage_configuration: + sc_dynamic_storage_classname: "" + root_ca_secret: "{{ meta.name }}-root-ca" + image_pull_secrets: + - admin.registrykey + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/baw/dba-umsregistration-initjob + tag: 20.0.2 + db2u_tools: + repository: docker.io/ibmcom/db2u.tools + tag: 11.5.1.0-CN1 + db2: + repository: docker.io/ibmcom/db2 + tag: 11.5.1.0-CN1 + db2_auxiliary: + repository: docker.io/ibmcom/db2u.auxiliary.auth + tag: 11.5.1.0-CN1 + db2_init: + repository: docker.io/ibmcom/db2u.instdb + tag: 11.5.1.0-CN1 + db2_etcd: + repository: quay.io/coreos/etcd + tag: v3.3.10 + openldap: + repository: osixia/openldap + tag: 1.3.0 + busybox: + repository: docker.io/library/busybox + tag: latest + phpldapadmin: + repository: osixia/phpldapadmin + tag: 0.9.0 + ######################################################################## + ######## IBM FileNet Content Manager configuration ######## + ######################################################################## + ecm_configuration: + cpe: + image: + repository: cp.icr.io/cp/cp4a/fncm/cpe + tag: ga-555-p8cpe + cmis: + image: + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + image: + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + ######################################################################## + ######## IBM Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + resource_registry: + repository: cp.icr.io/cp/cp4a/baw/dba-etcd + tag: 20.0.2 + ######################################################################## + ######## IBM Workstream Server configuration ######## + ######################################################################## + baw_configuration: + - name: instance1 + image: + repository: cp.icr.io/cp/cp4a/baw/workflow-server + tag: "20.0.2" + pfs_bpd_database_init_job: + repository: cp.icr.io/cp/cp4a/baw/pfs-bpd-database-init-prod + tag: "20.0.2" + upgrade_job: + repository: cp.icr.io/cp/cp4a/baw/workflow-server-dbhandling + tag: "20.0.2" + content_integration: + init_job_image: + repository: cp.icr.io/cp/cp4a/baw/iaws-ps-content-integration + tag: "20.0.2" + case: + init_job_image: + repository: cp.icr.io/cp/cp4a/baw/workflow-server-case-initialization + tag: "20.0.2" + jms: + image: + repository: cp.icr.io/cp/cp4a/baw/jms + tag: "20.0.2" + ######################################################################## + ######## IBM Process Federation Server configuration ######## + ######################################################################## + pfs_configuration: + image: + repository: cp.icr.io/cp/cp4a/baw/pfs-prod + tag: "20.0.2" + ######################################################################## + ######## Elasticsearch configuration ######## + ######################################################################## + elasticsearch_configuration: + es_image: + repository: cp.icr.io/cp/cp4a/baw/pfs-elasticsearch-prod + tag: "20.0.2" + es_init_image: + repository: cp.icr.io/cp/cp4a/baw/pfs-init-prod + tag: "20.0.2" + es_nginx_image: + repository: cp.icr.io/cp/cp4a/baw/pfs-nginx-prod + tag: "20.0.2" diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_aca.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_aca.yaml new file mode 100644 index 00000000..6e75ce3e --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_aca.yaml @@ -0,0 +1,672 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4aca + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + shared_configuration: + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "production" + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitlement Registry is used, then + ## it should be "cp.stg.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + ## The deployment type as selected by the user. Possible values are: demo, enterprise. + sc_deployment_type: "enterprise" + sc_deployment_context: "CP4A" + ## The platform to be deployed specified by the user. Possible values are: OCP, ROKS and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: "OCP" + ## CP4A patterns or capabilities to be deployed. This CR represents the "contentanalyzer" pattern, which includes the following + ## mandatory components: ums, rr, icn (BAN/Navigator), bastudio + sc_deployment_patterns: contentanalyzer + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## Content Analyzer does not have optional components to customize. + sc_optional_components: + root_ca_secret: "{{ meta.name }}-root-ca" + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "" + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob + tag: 20.0.2 + pull_policy: IfNotPresent + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "Microsoft Active Directory" + ## The name of the LDAP server to connect + lc_ldap_server: "" + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + ad: + lc_ad_gc_host: "" + lc_ad_gc_port: "" + lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for UMS (User Management Service) + dc_ums_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_oauth_alternate_hosts and dc_ums_oauth_alternate_ports + ## are set. + dc_ums_oauth_type: "" + ## Provide the database server name or IP address of the database server. + dc_ums_oauth_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_oauth_port: "" + ## Provide the name of the database for UMS. For example: "UMSDB" + dc_ums_oauth_name: "" + dc_ums_oauth_schema: OAuthDBSchema + dc_ums_oauth_ssl: true + dc_ums_oauth_ssl_secret_name: + dc_ums_oauth_driverfiles: + dc_ums_oauth_alternate_hosts: + dc_ums_oauth_alternate_ports: + ## The database database configuration for teamserver + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_teamserver_alternate_hosts and dc_ums_teamserver_alternate_ports + ## are set. + dc_ums_teamserver_type: "" + dc_ums_teamserver_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_teamserver_port: "" + ## Provide the name of the database for UMS teamserver. For example: "UMSDB" + dc_ums_teamserver_name: "" + dc_ums_teamserver_ssl: true + dc_ums_teamserver_ssl_secret_name: + dc_ums_teamserver_driverfiles: + dc_ums_teamserver_alternate_hosts: + dc_ums_teamserver_alternate_ports: + ## The database configuration for ACA (Content Analyzer) + dc_ca_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" + dc_database_type: "" + ## Provide the hostname of the primary DB2 server in this variable and IF your DB2 hostname is not resolvable by DNS THEN provide the corresponding IP address for the `database_IP` parameter below when `dc_database_type` is `db2HADR`. + database_servername: "" + ## Provide the name of the BASE database for ACA. For example: "BASECA" + database_name: "" + ## Provide the names of the TENANT databases for ACA. + tenant_databases: + - "" + ## Provide the database server port. For Db2, the default is "50000". + database_port: "" + ## Enable SSL/TLS for database communication. Refer to Knowledge Center for more info. + dc_database_ssl_enabled: false + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + + ## Provide the standby database server name and if your standby database server name cannot be resolvable by DNS, then provide the corresponding IP address for the `dc_hadr_standby_ip` parameter below. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## Provide the primary database server IP address if database_servername cannot be resolved by DNS. + database_ip: "" + ## Provide the standby database server IP address if dc_hadr_standby_servername cannot be resolved by DNS. + dc_hadr_standby_ip: "" + ############################################################################ + ######## IBM Business Automation Content Analyzer configuration ######## + ############################################################################ + ca_configuration: + global: + arch: "amd64" + # The database secret name created as part of the pre-req. Default will be "aca-basedb" if blank. + db_secret: + image: + repository: "default-route-openshift-image-registry.apps.almanac.os.fyre.ibm.com/bacaop1" + tag: "20.0.2" + pull_policy: "IfNotPresent" + # The max of retrying for CA deployment verification task until all the pods are in Ready status. A delay of 20 seconds between each attempt. Default will be 90 if blank + retries: "90" + # Timeout for Content Analyzer's ocr_extraction, classifyprocess, processing, updatefiledetail components. Default will be 300 seconds if blank + celery: + process_timeout: 300 + # ACA configuration PVC setting + configs: + claimname: "sp-config-pvc" + #Size of config PVC in GB + size: + # ACA log PVC setting + logs: + claimname: "sp-log-pvc" + log_level: "info" + #Size of log PVC in GB + size: + # ACA data PVC setting + data: + claimname: "sp-data-pvc" + #Size of Data PVC in GB + size: + # Redis configuration + redis: + resources: + limits: + memory: "640Mi" + cpu: "0.25" + replica_count: 3 + # RabbitMQ configuration + rabbitmq: + resources: + limits: + memory: "640Mi" + cpu: "0.5" + replica_count: 3 + # Caller_api configuration + caller_api: + replica_count: 2 + resources: + limits: + memory: "700Mi" + cpu: "0.6" + # Backend configuration + spbackend: + # Allow to specify a specific port (nodePort) for backend. The port number must be between 30000-32767. A random port will be generated if blank. + port: + replica_count: 2 + resources: + limits: + memory: "640Mi" + cpu: "0.6" + # Frontend configuration + spfrontend: + # Allow to specify a specific port (nodePort) for frontend. The port number must be between 30000-32767. A random port will be generated if blank. + port: + replica_count: 2 + resources: + limits: + memory: "480Mi" + cpu: "0.6" + backend_host: "" + frontend_host: "" + # Postprocessing configuration + postprocessing: + process_timeout: 1500 + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "480Mi" + cpu: "0.6" + # Pdfprocess configuration + pdfprocess: + process_timeout: 1500 + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "960Mi" + cpu: "0.6" + # utfprocess configuration + utfprocess: + process_timeout: 1500 + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "960Mi" + cpu: "1" + # setup configuration + setup: + process_timeout: 120 + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "480Mi" + cpu: "0.6" + # ocrextraction configuration + ocrextraction: + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "1440Mi" + cpu: "1" + # classifyprocess configuration + classifyprocess: + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "960Mi" + cpu: "1" + # processingextraction configuration + processingextraction: + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "1440Mi" + cpu: "1" + # updatefiledetail configuration + updatefiledetail: + replica_count: 2 + max_unavailable_count: 1 + resources: + limits: + memory: "480Mi" + cpu: "0.6" + +# ######################################################################## +# ######## User Management Service configuration ######## +# ######################################################################## +# ums_configuration: +# existing_claim_name: +# replica_count: 2 +# service_type: Route +# hostname: +# port: 443 +# images: +# ums: +# repository: cp.icr.io/cp/cp4a/ums/ums +# tag: 20.0.2 +# admin_secret_name: ibm-dba-ums-secret +# ## optional for secure communication with UMS +# external_tls_secret_name: ibm-dba-ums-external-tls-secret +# ## optional for secure communication with UMS +# external_tls_ca_secret_name: ibm-dba-ums-external-tls-ca-secret +# ## optional for secure communication with UMS +# external_tls_teams_secret_name: ibm-dba-ums-external-tls-teams-secret +# ## optional for secure communication with UMS +# external_tls_scim_secret_name: ibm-dba-ums-external-tls-scim-secret +# ## optional for secure communication with UMS +# external_tls_sso_secret_name: ibm-dba-ums-external-tls-sso-secret +# oauth: +# ## optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret +# client_manager_group: +# ## optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret +# token_manager_group: +# ## optional: lifetime of OAuth access_tokens. default is 7200s +# access_token_lifetime: +# ## optional: lifetime of app-tokens. default is 366d +# app_token_lifetime: +# ## optional: lifetime of app-passwords. default is 366d +# app_password_lifetime: +# ## optional: maximimum number of app-tokens or app-passwords per client. default is 100 +# app_token_or_password_limit: +# ## optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 +# client_secret_encoding: +# resources: +# limits: +# cpu: 500m +# memory: 512Mi +# requests: +# cpu: 200m +# memory: 256Mi +# ## Horizontal Pod Autoscaler +# autoscaling: +# enabled: true +# min_replicas: 2 +# max_replicas: 5 +# target_average_utilization: 98 +# use_custom_jdbc_drivers: false +# use_custom_binaries: false +# custom_secret_name: +# custom_xml: +# logs: +# console_format: json +# console_log_level: INFO +# console_source: message,trace,accessLog,ffdc,audit +# trace_format: ENHANCED +# trace_specification: "*=info" +# +# ######################################################################## +# ######## Resource Registry configuration ######## +# ######################################################################## +# resource_registry_configuration: +# admin_secret_name: resource-registry-admin-secret +# hostname: +# port: +# replica_size: 3 +# images: +# pull_policy: IfNotPresent +# resource_registry: +# repository: cp.icr.io/cp/cp4a/aae/dba-etcd +# tag: 20.0.2 +# tls: +# tls_secret: rr-tls-client-secret +# probe: +# liveness: +# initial_delay_seconds: 60 +# period_seconds: 10 +# timeout_seconds: 5 +# success_threshold: 1 +# failure_threshold: 3 +# readiness: +# initial_delay_seconds: 10 +# period_seconds: 10 +# timeout_seconds: 5 +# success_threshold: 1 +# failure_threshold: 3 +# resource: +# limits: +# cpu: "500m" +# memory: "512Mi" +# requests: +# cpu: "200m" +# memory: "256Mi" +# auto_backup: +# enable: false +# minimal_time_interval: 180 +# pvc_name: rr-autobackup-pvc +# dynamic_provision: +# enable: false +# access_mode: ReadWriteMany +# size: 3Gi +# storage_class: nfs +# +# ############################################################################# +# ## IBM Business Automation Studio configuration ## +# ############################################################################# +# bastudio_configuration: +# #Adjust this one if you created the secret with name other than the default +# admin_secret_name: "{{ meta.name }}-bas-admin-secret" +# #Provide BAStudio default administrator ID +# admin_user: "" +# replica_size: 1 +# database: +# # The database type used. Only DB2, Oracle supported +# type: "db2" +# #DB2 - Provide the database server hostname for BAStudio use +# host: "" +# # DB2 - Provide the database name for BAStudio use +# # The database provided should be created by the BAStudio SQL script template. +# name: "" +# # DB2 - Provide the database server port for BAStudio use +# port: "" +# # DB2 - If you want to enable database automatic client reroute (ACR) for HADR, you must configure alternative_host and alternative_port. Otherwise, leave them blank. +# alternative_host: +# alternative_port: +# # If you enabled SSL for Database please enable this one to set it to true +# ssl_enabled: false +# # Oracle - If you are using Oracle input the oracle database connection URL here +# oracle_url: +# cm_max_pool_size: '50' +# cm_min_pool_size: '2' +# # If you enabled the SSL for database. Please save the TLS certificate used by databased in a secret and put the name here +# certificate_secret_name: +# # If you are using custom JDBC (for example using Oracle or some special DB2 driver). Please set this one to true +# user_custom_jdbc_drivers: false +# # The PVC name which bind to the PV which have the custom JDBC driver files stored +# custom_jdbc_pvc: +# # The custom JDBC file set +# jdbc_driver_files: 'db2jcc4.jar db2jcc_license_cisuz.jar db2jcc_license_cu.jar' +# autoscaling: +# enabled: false +# minReplicas: 1 +# maxReplicas: 3 +# targetAverageUtilization: 80 +# external_connection_timeout: 60s +# # Custom liberty XML configurations +# custom_xml: +# # The secret name which contain custom liberty configurations +# custom_secret_name: +# # The Business Automation Custom XML configurations +# bastudio_custom_xml: +# max_cached_objects_during_refactoring: 256 +# logs: +# consoleFormat: 'json' +# consoleLogLevel: 'INFO' +# consoleSource: 'message,trace,accessLog,ffdc,audit' +# traceFormat: 'ENHANCED' +# traceSpecification: '*=info' +# tls: +# tlsTrustList: [] +# liveness_probe: +# initialDelaySeconds: 300 +# periodSeconds: 10 +# timeoutSeconds: 5 +# failureThreshold: 3 +# successThreshold: 1 +# readiness_probe: +# initialDelaySeconds: 240 +# periodSeconds: 5 +# timeoutSeconds: 5 +# failureThreshold: 6 +# successThreshold: 1 +# resources: +# bastudio: +# limits: +# cpu: '4' +# memory: '3Gi' +# requests: +# cpu: '2' +# memory: '2Gi' +# init_process: +# limits: +# cpu: '500m' +# memory: '512Mi' +# requests: +# cpu: '200m' +# memory: '256Mi' +# csrf_referrer: +# whitelist: '' +# #----------------------------------------------------------------------- +# # App Engine Playback Server (playback_server) can be only one instance. This is different from App Engine (where application_engine_configuration is a list and you can deploy multiple instances). +# #----------------------------------------------------------------------- +# playback_server: +# hostname: +# port: 443 +# admin_secret_name: "" +# admin_user: "" +# external_tls_secret: +# external_connection_timeout: +# replica_size: 1 +# ## optional when db2, must required when oracle +# user_custom_jdbc_drivers: false +# service_type: Route +# autoscaling: +# enabled: false +# max_replicas: 5 +# min_replicas: 2 +# target_cpu_utilization_percentage: 80 +# database: +# host: "" +# name: "" +# port: "" +# ## If you setup DB2 HADR and want to use it, you need to configure alternative_host and alternative_port, or else, leave is as blank. +# alternative_host: +# alternative_port: +# ## Only DB2, Oracle is supported +# type: db2 +# ## Required only when type is Oracle, both ssl and non-ssl. The format must be purely oracle descriptor like (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=)(PORT=))(CONNECT_DATA=(SERVICE_NAME=))) +# oracle_url_without_wallet_directory: +# enable_ssl: false +# ## Required only when type is Oracle and enable_ssl is true. The format must be purely oracle descriptor. SSO wallet directory must be specified and fixed to (MY_WALLET_DIRECTORY=/shared/resources/oracle/wallet). +# oracle_url_with_wallet_directory: +# ## Required only when enable_ssl is true, both db2 and oracle db type +# db_cert_secret_name: +# ## Required only when type is oracle and enable_ssl is true. +# oracle_sso_wallet_secret_name: +# ## Optional. If it is empty, the DBASB is default when db2 and the AE_DATABASE_USER set in the admin_secret_name is default when oracle +# current_schema: DBASB +# initial_pool_size: 1 +# max_pool_size: 10 +# uv_thread_pool_size: 4 +# max_lru_cache_size: 1000 +# max_lru_cache_age: 600000 +# dbcompatibility_max_retries: 30 +# dbcompatibility_retry_interval: 10 +# ## The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled +# custom_jdbc_pvc: +# log_level: +# node: info +# browser: 2 +# content_security_policy: +# enable: false +# whitelist: +# env: +# max_size_lru_cache_rr: 1000 +# server_env_type: development +# purge_stale_apps_interval: 86400000 +# apps_threshold: 100 +# stale_threshold: 172800000 +# images: +# pull_policy: IfNotPresent +# db_job: +# repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db +# tag: 20.0.2 +# solution_server: +# repository: cp.icr.io/cp/cp4a/aae/solution-server +# tag: 20.0.2 +# max_age: +# auth_cookie: "900000" +# csrf_cookie: "3600000" +# static_asset: "2592000" +# hsts_header: "2592000" +# probe: +# liveness: +# failure_threshold: 5 +# initial_delay_seconds: 60 +# period_seconds: 10 +# success_threshold: 1 +# timeout_seconds: 180 +# readiness: +# failure_threshold: 5 +# initial_delay_seconds: 10 +# period_seconds: 10 +# success_threshold: 1 +# timeout_seconds: 180 +# redis: +# host: +# port: +# ttl: 1800 +# resource_ae: +# limits: +# cpu: 2000m +# memory: 2Gi +# requests: +# cpu: 1000m +# memory: 1Gi +# resource_init: +# limits: +# cpu: 500m +# memory: 256Mi +# requests: +# cpu: 200m +# memory: 128Mi +# session: +# check_period: "3600000" +# duration: "1800000" +# max: "10000" +# resave: "false" +# rolling: "true" +# save_uninitialized: "false" +# use_external_store: "true" +# tls: +# tls_trust_list: [] +# # If you want to make the replicate size more than 1 for this cluster. Then you must enable the shared storage +# share_storage: +# enabled: false +# # If you create the PV manually. Then please provide the PVC name bind here +# pvc_name: +# auto_provision: +# enabled: false +# # Required if you enabled the auto provision +# storage_class: +# size: 20Gi diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_application.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_application.yaml new file mode 100644 index 00000000..14f5f9ed --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_application.yaml @@ -0,0 +1,180 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + shared_configuration: + ## CP4A patterns or capabilities to be deployed. This CR represents the "application" pattern (aka Business Automation Manager), which includes the following + ## mandatory components: ban(Business Automation Navigator), ums (User Management Service), app_engine( Application Engine) and optional components: app_designer + sc_deployment_patterns: application + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: app_designer + sc_optional_components: + + ##################################################################### + ## IBM App Engine production configuration # + ##################################################################### + application_engine_configuration: + ## The application_engine_configuration is a list, you can deploy multiple instances of AppEngine, you can assign different configurations for each instance. + ## For each instance, application_engine_configuration.name and application_engine_configuration.name.hostname must be assigned to different values. + - name: workspace + images: + pull_policy: IfNotPresent + solution_server: + repository: cp.icr.io/cp/cp4a/aae/solution-server + tag: 20.0.2 + db_job: + repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db + tag: 20.0.2 + # If you inputed hostname and port here. They will be used always + # If you are using pattern mode (the shared_configuration.sc_deployment_patterns contains value) + # Then you don't need to fill the hostname and port. It will use shared_configuration.sc_deployment_hostname_suffix to generate one + # But if you haven't input suffix. And no hostname port assigned. A error will be reported in operator log during deploy + # For non pattern mode you must assign a valid hostname and port here + hostname: "{{ 'ae-workspace.' + shared_configuration.sc_deployment_hostname_suffix }}" + port: 443 + # Inside the admin secret. There are two must fields + # AE_DATABASE_PWD: + # AE_DATABASE_USER: + admin_secret_name: + # The default admin user id for application engine + # The user ID should be bootstrap admin ID for IBM Business Automation Navigator. It is case sensitive. + # The same ID should be a User Management Service (UMS) admin user also. + admin_user: + external_tls_secret: + external_connection_timeout: 90s + replica_size: 1 + ## optional when db2, must required when oracle + user_custom_jdbc_drivers: false + service_type: Route + autoscaling: + enabled: false + max_replicas: 5 + min_replicas: 2 + target_cpu_utilization_percentage: 80 + database: + # AE Database host name or IP when the database is DB2 + host: + # AE Database name when the database is DB2 + name: + # AE database port number when the database is DB2 + port: + ## If you setup DB2 HADR and want to use it, you need to configure alternative_host and alternative_port, or else, leave is as blank. + alternative_host: + alternative_port: + ## Only DB2, Oracle is supported + type: db2 + ## Required only when type is Oracle, both ssl and non-ssl. The format must be purely oracle descriptor like (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=)(PORT=))(CONNECT_DATA=(SERVICE_NAME=))) + oracle_url_without_wallet_directory: + enable_ssl: false + ## Required only when type is Oracle and enable_ssl is true. The format must be purely oracle descriptor. SSO wallet directory must be specified and fixed to (MY_WALLET_DIRECTORY=/shared/resources/oracle/wallet). + oracle_url_with_wallet_directory: + ## Required only when enable_ssl is true, both db2 and oracle db type + db_cert_secret_name: + ## Required only when type is oracle and enable_ssl is true. + oracle_sso_wallet_secret_name: + ## Optional. If it is empty, the DBASB is default when db2 and the AE_DATABASE_USER set in the admin_secret_name is default when oracle + current_schema: DBASB + initial_pool_size: 1 + max_pool_size: 10 + uv_thread_pool_size: 4 + max_lru_cache_size: 1000 + max_lru_cache_age: 600000 + dbcompatibility_max_retries: 30 + dbcompatibility_retry_interval: 10 + ## The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled + custom_jdbc_pvc: + log_level: + node: info + browser: 2 + content_security_policy: + enable: false + whitelist: + env: + max_size_lru_cache_rr: 1000 + server_env_type: development + purge_stale_apps_interval: 86400000 + apps_threshold: 100 + stale_threshold: 172800000 + max_age: + auth_cookie: "900000" + csrf_cookie: "3600000" + static_asset: "2592000" + hsts_header: "2592000" + probe: + liveness: + failure_threshold: 5 + initial_delay_seconds: 60 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + readiness: + failure_threshold: 5 + initial_delay_seconds: 10 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + # Redis settings only when you set session.use_external_store to true + redis: + # Your external redis host/ip + host: localhost + # Your external redis port + port: 6379 + ttl: 1800 + resource_ae: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 300m + memory: 512Mi + resource_init: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + session: + check_period: "3600000" + duration: "1800000" + max: "10000" + resave: "false" + rolling: "true" + save_uninitialized: "false" + # By setting this option to true. The AE will use external Redis as session storage + # To support multiple AE pods + use_external_store: "false" + tls: + tls_trust_list: [] + # If you want to make the replicate size more than 1 for this cluster. Then you must enable the shared storage + share_storage: + enabled: false + # If you create the PV manually. Then please provide the PVC name bind here + pvc_name: + auto_provision: + enabled: false + # Required if you enabled the auto provision + storage_class: + size: 20Gi \ No newline at end of file diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_content.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_content.yaml new file mode 100644 index 00000000..71c285ec --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_content.yaml @@ -0,0 +1,1185 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + appVersion: 20.0.2 + shared_configuration: + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: "" + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images. + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## All CP4A components must use/share the root_ca_secret in order for integration. + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "content" pattern (aka FileNet Content Manager), which includes the following + ## mandatory components: cpe, icn (BAN/Navigator), graphql and optional components: cmis, es (External Share). + sc_deployment_patterns: content + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: cmis, css (Content Search Services), es (External Share). + sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: demo, enterprise. + sc_deployment_type: enterprise + + ## Specify the RunAsUser for the security context of the pod. This is usually a numeric value that correponds to a user ID. + sc_run_as_user: + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## Enable/disable ECM (FNCM) / BAN initialization (e.g., creation of P8 domain, creation/configuration of object stores, + ## creation/configuration of CSS servers, and initialization of Navigator (ICN)). If the "initialize_configuration" section + ## is defined in the CR (below), then that configuration will take precedence overriding this parameter. + sc_content_initialization: false + ## Enable/disable the ECM (FNCM) / BAN verification (e.g., creation of test folder, creation of test document, + ## execution of CBR search, and creation of Navigator demo repository and desktop). If the "verify_configuration" + ## section is defined in the CR (below), then that configuration will take precedence overriding this parameter. + sc_content_verification: false + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## User script should only uncomment this section if External Share if selected as an optional component. + ## If you are deploying without the User script, uncomment the necessary section (depending + ## if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ext_ldap_configuration: + # lc_selected_ldap_type: "" + # lc_ldap_server: "" + # lc_ldap_port: "" + # lc_bind_secret: ldap-bind-secret + # lc_ldap_base_dn: "" + # lc_ldap_ssl_enabled: true + # lc_ldap_ssl_secret_name: "" + # lc_ldap_user_name_attribute: "" + # lc_ldap_user_display_name_attr: "" + # lc_ldap_group_base_dn: "" + # lc_ldap_group_name_attribute: "" + # lc_ldap_group_display_name_attr: "cn" + # lc_ldap_group_membership_search_filter: "" + # lc_ldap_group_member_id_map: "" + + ## User script will uncomment the section needed based on user's input from User script. + ## If you are deploying without the User script, uncomment the necessary section (depending + ## if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + ## This is the Global Catalog port for the LDAP + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## Uncomment this section if you have OpenId Connect providers. + # open_id_connect_providers: "" + ## Set the provider name that is for your redirect url. + #- provider_name: "" + ## Set the display name for the sign in button in navigator. + # display_name: "Single Sign on" + ## Enter your oidc secret names here for cpe, nav external share and graphql. + ## Not all are required depending on you deployment. + # client_oidc_secret: + # es: "" # Points to a secret with client_id and client_secret in that format. + # nav: "" # Points to a secret with client_id and client_secret in that format. + # cpe: "" # Points to a secret with client_id and client_secret in that format. + # graphql: "" # Points to a secret with client_id and client_secret in that format. + # issuer_identifier: "" + ## REQUIRED PROPERTIES AND VALUES which are common + ## If not set will be set to the defaults. + # response_type: "code" + # scope: "openid email profile" + # map_identity_to_registry_user: "false" + # authn_session_disabled: "false" + # inbound_propagation: "supported" + # https_required: "true" + # validation_method: "introspect" + # disable_ltpa_cookie: "true" + # signature_algorithm: "RS256" + # user_identifier: "sub" # sub for ums and ibm id, email for google + # unique_user_identifier: "sub" # sub for ums and ibm id, email for google + # user_identity_to_create_subject: "sub" # sub for ums and ibm id, email for google + ## + ## Uncomment out discovery_endpoint_url for Google or UMS IdP. + ## + # discovery_endpoint_url: + ## + ## Optional parameters + ## + # authorization_endpoint_url: "" + # token_endpoint_url: "" + # validation_endpoint_url: "" + # trust_alias_name: "secrent name you created" + # disables_iss_checking: "true" + # jwk_client_oidc_secret: + # es: "" # Points to a secret with client_id and client_secret in that format. + # nav: "" # Points to a secret with client_id and client_secret in that format. + # cpe: "" # Points to a secret with client_id and client_secret in that format. + # graphql: "" # Points to a secret with client_id and client_secret in that format. + # token_reuse: "true" + ## + ## User defined parameters. + ## If you do not see a parameter that is needed for you OpenId Connect provider. + ## You are able to defined in this section has a key value pair separated by the delimeter `:` + ## If you want to change the default delimeter, add `DELIM=` infront of your + ## key value pair. Ex: 'DELIM=;myKey;myValue'. In this example, the new delimeter is `;` and + ## the key value pair is set to `myKey;myValue` instead of `myKey:myValue`. + ## + # oidc_ud_param: + # - 'DELIM=;myKey;myValue' + # - "myKey2:myValue2" + # - "myKey3:myValue3" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for the GCD datasource for CPE + dc_gcd_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". + dc_database_type: "" + ## The GCD non-XA datasource name. The default value is "FNGCDDS". + dc_common_gcd_datasource_name: "FNGCDDS" + ## The GCD XA datasource name. The default value is "FNGCDDSXA". + dc_common_gcd_xa_datasource_name: "FNGCDDSXA" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the name of the database for the GCD for CPE. For example: "GCDDB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_gcd_jdbc_url: "" + + ## If the database type is Db2 HADR, then complete the rest of the parameters below. + ## Provide the database server name or IP address of the standby database server. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for the object store 1 (OS1) datasource for CPE + dc_os_datasources: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD configuration above. + - dc_database_type: "" + ## The OS1 non-XA datasource name. The default value is "FNOS1DS". + dc_common_os_datasource_name: "FNOS1DS" + ## The OS1 XA datasource name. The default value is "FNOS1DSXA". + dc_common_os_xa_datasource_name: "FNOS1DSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## Monitor setting + # monitoring_configuration: + # mon_metrics_writer_option: 4 + # mon_enable_plugin_pch: false + # mon_enable_plugin_mbean: false + # collectd_plugin_write_graphite_host: localhost + # collectd_plugin_write_graphite_port: 2003 + # collectd_interval: 10 + # collectd_disable_host_monitoring: false + # collectd_plugin_write_prometheus_port: 9103 + + # # Logging setting + # logging_configuration: + # mon_log_parse: false + # mon_log_service_endpoint: localhost:5044 + # private_logging_enabled: false + # logging_type: default + # mon_log_path: /path_to_extra_log + + ######################################################################## + ######## IBM FileNet Content Manager configuration ######## + ######################################################################## + ecm_configuration: + + ## Enable/Disable log for FNCM + no_log: false + + ## FNCM secret that contains GCD DB user name and password, Object Store DB user name and password, + ## LDAP user and password, CPE username and password, keystore password, and LTPA passs, etc. + fncm_secret_name: ibm-fncm-secret + + #################################### + ## Start of configuration for CPE ## + #################################### + cpe: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cpe + tag: ga-555-p8cpe + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 3072Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CPE Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cpe_production_setting: + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 18 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 33 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + ## Default JNDI name for GCD for non-XA data source + gcd_jndi_name: FNGCDDS + ## Default JNDI name for GCD for XA data source + gcd_jndixa_name: FNGCDDSXA + license_model: FNCM.PVUNonProd + + # The license must be set to "accept" in order for the component to install. This is the default value. + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: false + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: false + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: false + + ## Persistent Volume Claims for CPE. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cpe_cfgstore: "cpe-cfgstore" + existing_pvc_for_cpe_logstore: "cpe-logstore" + existing_pvc_for_cpe_filestore: "cpe-filestore" + existing_pvc_for_cpe_icmrulestore: "cpe-icmrulesstore" + existing_pvc_for_cpe_textextstore: "cpe-textextstore" + existing_pvc_for_cpe_bootstrapstore: "cpe-bootstrapstore" + existing_pvc_for_cpe_fnlogstore: "cpe-fnlogstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + #################################### + ## Start of configuration for CSS ## + #################################### + css: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/css + tag: ga-555-p8css + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 4096Mi + + ## CSS Production setting + css_production_setting: + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 50 + + # The license must be set to "accept" in order for the component to install. This is the default value. + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: false + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: false + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: false + + ## Persistent Volume Claims for CSS. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_css_cfgstore: "css-cfgstore" + existing_pvc_for_css_logstore: "css-logstore" + existing_pvc_for_css_tmpstore: "css-tempstore" + existing_pvc_for_index: "css-indexstore" + existing_pvc_for_css_customstore: "css-customstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 60 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 180 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ##################################### + ## Start of configuration for CMIS ## + ##################################### + cmis: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 256Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CMIS Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cmis_production_setting: + ## By default, this parameter is set by the Operator using the CPE service endpoint (e.g., "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM") + cpe_url: + + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + ## Enable/disable Websphere Security + ws_security_enabled: false + + # Enable/disable the content-stream of the Private Working Copy should be copied from the Document that was checked out. + checkout_copycontent: true + # The default value for the optional maxItems input argument on paging-related services. + default_maxitems: 25 + + # Enable/disable whether ChoiceLists will be cached once for all users. + cvl_cache: true + secure_metadata_cache: false + + # Enable/disalbe hidden P8 domain properties should appear in CMIS type definitions and folder or document instance data. + filter_hidden_properties: true + + # Timeout in seconds for the queries that specify timeout. + querytime_limit: 180 + + # If true, then a faster response time for REST next line. If false, the next link for REST will re-issue query. + resumable_queries_forrest: true + + # Specifies whether to escape characters that are not valid for XML unicode as specified by the XML 1.0 standard. + escape_unsafe_string_characters: false + + # Limits the maximum allowable Web Service SOAP message request size. + max_soap_size: 180 + + # Enable/disable the printing of the full stack trace in the response. + print_pull_stacktrace: false + + # Configures the sequence in which CMIS tries to identify objects (folder or document first). + folder_first_search: false + + # To ignore the reading or writing contents in root folder, set this parameter to true. + ignore_root_documents: false + + # Enable/disable the support type mutability. + supporting_type_mutability: false + + # The license must be set to "accept" in order for the component to install. This is the default value. + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: false + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: false + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: false + + ## Persistent Volume Claims for CMIS. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cmis_cfgstore: "cmis-cfgstore" + existing_pvc_for_cmis_logstore: "cmis-logstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 90 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 180 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################## + ## Start of configuration for GraphQL ## + ######################################## + graphql: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/graphql + tag: ga-555-p8cgql + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + min_replicas: 1 + max_replicas: 3 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CMIS Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + graphql_production_setting: + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + license_model: FNCM.PVUNonProd + + # The license must be set to "accept" in order for the component to install. This is the default value. + license: accept + + enable_graph_iql: false + + ## By default, this parameter is set by the Operator using the CPE service endpoint (e.g., "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM") + cpe_uri: + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: false + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: false + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: false + + ## Persistent Volume Claims for GraphQL. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_graphql_cfgstore: "graphql-cfgstore" + existing_pvc_for_graphql_logstore: "graphql-logstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ############################################### + ## Start of configuration for External Share ## + ############################################### + es: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/extshare + tag: ga-308-es + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + min_replicas: 1 + max_replicas: 3 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default External Share Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + es_production_setting: + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + jvm_customize_options: + license_model: FNCM.PVUNonProd + + # The license must be set to "accept" in order for the component to install. This is the default value. + license: accept + allowed_origins: + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: false + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: false + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: false + + + ## Persistent Volume Claims for External Share. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_es_cfgstore: "es-cfgstore" + existing_pvc_for_es_logstore: "es-logstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 180 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + + ## Enable/Disable log for Navigator(aka ICN/BAN) + no_log: false + + ## Navigator secret that contains user credentials for LDAP and database + ban_secret_name: ibm-ban-secret + + ## The architecture of the cluster. This is the default for Linux and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry + repository: cp.icr.io/cp/cp4a/ban/navigator + tag: ga-308-icn + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## send email + java_mail: + host: "fncm-exchange1.ibm.com" + port: "25" + sender: "MailAdmin@fncmexchange.com" + ssl_enabled: false + + + ## Below are the default ICN Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + icn_production_setting: + timezone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + icn_db_type: + icn_jndids_name: ECMClientDS + icn_schema: ICNDB + icn_table_space: ICNDB + allow_remote_plugins_via_http: false + + + ## Default settings for monitoring + monitor_enabled: false + ## Default settings for logging + logging_enabled: false + + ## Persistent Volume Claims for ICN. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_icn_cfgstore: "icn-cfgstore" + existing_pvc_for_icn_logstore: "icn-logstore" + existing_pvc_for_icn_pluginstore: "icn-pluginstore" + existing_pvc_for_icnvw_cachestore: "icn-vw-cachestore" + existing_pvc_for_icnvw_logstore: "icn-vw-logstore" + existing_pvc_for_icn_aspera: "icn-asperastore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + existing_claim_name: + replica_count: 2 + service_type: Route + hostname: + port: 443 + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + admin_secret_name: ibm-dba-ums-secret + ## optional for secure communication with UMS + external_tls_secret_name: ibm-dba-ums-external-tls-secret + ## optional for secure communication with UMS + external_tls_ca_secret_name: ibm-dba-ums-external-tls-ca-secret + ## optional for secure communication with UMS + external_tls_teams_secret_name: ibm-dba-ums-external-tls-teams-secret + ## optional for secure communication with UMS + external_tls_scim_secret_name: ibm-dba-ums-external-tls-scim-secret + ## optional for secure communication with UMS + external_tls_sso_secret_name: ibm-dba-ums-external-tls-sso-secret + oauth: + ## optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret + client_manager_group: + ## optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret + token_manager_group: + ## optional: lifetime of OAuth access_tokens. default is 7200s + access_token_lifetime: + ## optional: lifetime of app-tokens. default is 366d + app_token_lifetime: + ## optional: lifetime of app-passwords. default is 366d + app_password_lifetime: + ## optional: maximimum number of app-tokens or app-passwords per client. default is 100 + app_token_or_password_limit: + ## optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 + client_secret_encoding: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + ## Horizontal Pod Autoscaler + autoscaling: + enabled: true + min_replicas: 2 + max_replicas: 5 + target_average_utilization: 98 + use_custom_jdbc_drivers: false + use_custom_binaries: false + custom_secret_name: + custom_xml: + logs: + console_format: json + console_log_level: INFO + console_source: message,trace,accessLog,ffdc,audit + trace_format: ENHANCED + trace_specification: "*=info" + + ######################################################################## + ######## IBM FileNet Content Manager Initialization configuration ###### + ######################################################################## + ## The deployment of FNCM will be initialized with the default values assigned to the parameters below. + ## The initialization process includes the creation of the P8 domain, the creation of the directory services, + ## the assignments of users/groups to the P8 domain and object store(s), the creation of the object store(s), + ## the creation/addition of add-ons for each object store, the enablement of workflow for each object store, the + ## creation of Content Search Services servers, index areas, and the enabling of Content-based Retrieval (CBR) for each object store. + ## In addition, the creation of Navigator desktop will also occur. + ## If any of the values below does not fit your infrastructure, then change the value to correpond to your configuration + ## (e.g., "CEAdmin" is the default user for ic_ldap_admin_user_name parameter and if you do not have "CEAdmin" user in your directory + ## server and have a different user, then replace "CEAdmin" with your own user). Otherwise, the rest of the values should remain as default. + initialize_configuration: + ic_domain_creation: + domain_name: "P8DOMAIN" + encryption_key: "128" + ic_ldap_creation: + ic_ldap_admin_user_name: + - "CEAdmin" + ic_ldap_admins_groups_name: + - "P8Administrators" + ic_ldap_name: "ldap_name" + ic_obj_store_creation: + object_stores: + - oc_cpe_obj_store_display_name: "OS01" + oc_cpe_obj_store_symb_name: "OS01" + oc_cpe_obj_store_conn: + name: "objectstore1_connection" + site_name: "InitialSite" + dc_os_datasource_name: "FNOS1DS" + dc_os_xa_datasource_name: "FNOS1DSXA" + oc_cpe_obj_store_admin_user_groups: + - "CEAdmin" + # Array of users + oc_cpe_obj_store_basic_user_groups: + oc_cpe_obj_store_addons: true + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + oc_cpe_obj_store_asa_name: "demo_storage" + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os01_storagearea1" + oc_cpe_obj_store_enable_workflow: true + oc_cpe_obj_store_workflow_region_name: "design_region_name" + oc_cpe_obj_store_workflow_region_number: 1 + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + oc_cpe_obj_store_workflow_index_tbl_space: "" + oc_cpe_obj_store_workflow_blob_tbl_space: "" + oc_cpe_obj_store_workflow_admin_group: "P8Administrators" + oc_cpe_obj_store_workflow_config_group: "P8Administrators" + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + oc_cpe_obj_store_workflow_locale: "en" + oc_cpe_obj_store_workflow_pe_conn_point_name: "pe_conn_os1" + # Enable the content event emitter only when deploying + # BAI and have shared_configuration.kafka_configuration defined in + # your cr. Default value is false if not specified in cr. + oc_cpe_obj_store_enable_content_event_emitter: false + ic_css_creation: + - css_site_name: "Initial Site" + css_text_search_server_name: "{{ meta.name }}-css-1" + affinity_group_name: "aff_group" + css_text_search_server_status: 0 + css_text_search_server_mode: 0 + css_text_search_server_ssl_enable: "true" + css_text_search_server_credential: "RNUNEWc=" + css_text_search_server_host: "{{ meta.name }}-css-svc-1" + css_text_search_server_port: 8199 + ic_css_index_area: + - object_store_name: "OS01" + index_area_name: "os1_index_area" + affinity_group_name: "aff_group" + root_dir: "/opt/ibm/indexareas" + max_indexes: 20 + max_objects_per_index: 10000 + ic_enable_cbr: + - object_store_name: "OS01" + class_name: "Document" + indexing_languages: "en" + ic_icn_init_info: + icn_repos: + - add_repo_id: "demo_repo1" + add_repo_ce_wsi_url: "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM/" + add_repo_os_sym_name: "OS01" + add_repo_os_dis_name: "OS01" + add_repo_workflow_enable: false + add_repo_work_conn_pnt: "pe_conn_os1:1" + add_repo_protocol: "FileNetP8WSI" + ## If you have more than 1 object store, uncomment this section for initialization of the object store. + # - add_repo_id: "test_repo2" + # add_repo_ce_wsi_url: "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM/" + # add_repo_os_sym_name: "OS02" + # add_repo_os_dis_name: "OS02" + # add_repo_workflow_enable: true + # add_repo_work_conn_pnt: "pe_conn_os02:1" + # add_repo_protocol: "FileNetP8WSI" + icn_desktop: + - add_desktop_id: "demo" + add_desktop_name: "icn_desktop" + add_desktop_description: "This is ICN desktop" + add_desktop_is_default: false + add_desktop_repo_id: "demo_repo1" + add_desktop_repo_workflow_enable: false + + ######################################################################## + ######## IBM FileNet Content Manager Verification configuration ###### + ######################################################################## + ## After the initialization process (see section above), the verification process will take place. + ## The verification process ensures that the FNCM and BAN components are functioning correctly. The verification + ## process includes creation of a CPE folder, a CPE document, a CBR search, verifying the workflow configuration, + ## and validation of the ICN desktop. + verify_configuration: + vc_cpe_verification: + vc_cpe_folder: + - folder_cpe_obj_store_name: "OS01" + folder_cpe_folder_path: "/TESTFOLDER" + vc_cpe_document: + - doc_cpe_obj_store_name: "OS01" + doc_cpe_folder_name: "/TESTFOLDER" + doc_cpe_doc_title: "test_title" + DOC_CPE_class_name: "Document" + doc_cpe_doc_content: "This is a simple document test" + doc_cpe_doc_content_name: "doc_content_name" + vc_cpe_cbr: + - cbr_cpe_obj_store_name: "OS01" + cbr_cpe_class_name: "Document" + cbr_cpe_search_string: "is a simple" + vc_cpe_workflow: + - workflow_cpe_enabled: false + workflow_cpe_connection_point: "pe_conn_os1" + vc_icn_verification: + - vc_icn_repository: "demo_repo1" + vc_icn_desktop_id: "demo" diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_decisions.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_decisions.yaml new file mode 100644 index 00000000..58c85a03 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_decisions.yaml @@ -0,0 +1,244 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + + ################################################################################################################# + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + + shared_configuration: + + ## CP4A patterns or capabilities to be deployed. This CR represents Operational Decsision Manage "decisions" pattern + ## that brings Decision Center, Rule Execution Server and Decision Runtime, based on the user specification in the + ## sc_optional_components specification + sc_deployment_patterns: decisions + + ## The optional components to be installed if listed here. + ## This is normally populated by the deploy script based on input from the user. + ## User can also manually specify the optional components to be deployed here. + ## This pattern has has 3: decisionCenter, decisionRunner, and decisionServerRuntime components, where + ## decisionRuntim represent the Rule Execution Server. + ## If decisionCenter is set, you also have to set the 'odm_configuration.decisionCenter.enabled' flag to true to install it. + ## If decisionRunner is set, you also have to set the 'odm_configuration.decisionRunner.enabled' flag to true to install it. + ## If decisionServerRuntime is set, you also have to set the 'odm_configuration.decisionRuntime.enabled' flag to true to install it. + sc_optional_components: + + ## The beginning section of database configuration for CP4A + datasource_configuration: + dc_odm_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". + dc_database_type: "db2" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + dc_common_database_port: "" + ## Provide the name of the database for ODM. For example: "ODMDB" + dc_common_database_name: "" + ## The name of the secret that contains the credentials to connect to the database. + dc_common_database_instance_secret: "" + + + ######################################################################## + ######## IBM Operational Decision Manager Configuration ######## + ######################################################################## + odm_configuration: + # Allow to activate more trace for ODM in the Operator pod. + debug: false + # Allow to specify which version of ODM you want to deploy. + # Supported version > 19.0.2 + # If omitted the latest version will be used. + version: 20.0.2 + image: + # Specify the repository used to retrieve the Docker images if you do not want to use the default one. + repository: "cp.icr.io/cp/cp4a/odm" + # Specify the tag for the Docker images. + # It's a Mandatory tag when you enable odm_configuraton. + tag: 8.10.4.0 + # Specify the pull policy for the Docker images. See Kuberntes documentation for more inforations. + # Possible values : IfNotPresent, Always, Never + pullPolicy: IfNotPresent + # Optionally specify an array of imagePullSecrets. + # Secrets must be manually created in the namespace. + # Ex: pullSecrets: "mypullsecret" + pullSecrets: + + service: + # Specify whether to enable Transport Layer Security. If true, ODM web apps are accessed through HTTPS. If false, they are accessed through HTTP. + enableTLS: true + # Specify the service type. + type: NodePort + + ## Decision Server Runtime parameters + decisionServerRuntime: + # Specify whether to enable Decision Server Runtime. + enabled: true + # Specify the number of Decision Server Runtime pods. + replicaCount: 1 + # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. + loggingRef: + # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. + jvmOptionsRef: + # Specify the name of the configmap that contains the XU configuration property file. + xuConfigRef: + resources: + requests: + # Specify the requested CPU. + cpu: 500m + # Specify the requested memory. + memory: 512Mi + limits: + # Specify the CPU limit. + cpu: 2 + # Specify the memory limit. + memory: 4096Mi + + ## Decision Server Console parameters + decisionServerConsole: + # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. + loggingRef: + # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. + jvmOptionsRef: + resources: + requests: + # Specify the requested CPU. + cpu: 500m + # Specify the requested memory. + memory: 512Mi + limits: + # Specify the CPU limit. + cpu: 2 + # Specify the memory limit. + memory: 1024Mi + + ## Decision Center parameters + decisionCenter: + # Specify whether to enable Decision Center. + enabled: true + # Specify the persistence locale for Decision Center. + # Possible values "ar_EG" (Arabic - Egypt), "zh_CN" (Chinese - China), "zh_TW" (Chinese - Taiwan) + # "nl_NL" (Netherlands), "en_GB" (English - United Kingdom), "en_US" (English - United States), + # "fr_FR" (French - France), "de_DE" (German - Germany), "iw_IL" (Hebrew - Israel), "it_IT" (Italian - Italy), + # "ja_JP" (Japanese - Japan) , "ko_KR" (Korean - Korea), "pl_PL" (Polish - Poland), + # "pt_BR" (Portuguese - Brazil), "ru_RU" (Russian - Russia), "es_ES" (Spanish - Spain) + persistenceLocale: en_US + # Specify the number of Decision Center pods. + replicaCount: 1 + # Persistent Volume Claim to access the custom libraries + customlibPvc: + # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. + loggingRef: + # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. + jvmOptionsRef: + resources: + requests: + # Specify the requested CPU. + cpu: 500m + # Specify the requested memory. + memory: 1500Mi + limits: + # Specify the CPU limit. + cpu: 2 + # Specify the memory limit. + memory: 4096Mi + + ## Decision Runner parameters + decisionRunner: + # Specify whether to enable Decision Runner. + enabled: true + # Specify the number of Decision Runner pods. + replicaCount: 1 + # Specify the name of the configMap the wanted logging options. If left empty, default logging options are used. + loggingRef: + # Specify the name of the configMap the wanted JVM options. If left empty, default JVM options are used. + jvmOptionsRef: + resources: + requests: + # Specify the requested CPU. + cpu: 500m + # Specify the requested memory. + memory: 512Mi + limits: + # Specify the CPU limit. + cpu: 2 + # Specify the memory limit. + memory: 4096Mi + + ## Database - Option 3: External (Oracle) + ## Fill in the parameters to use an external database configured by a secret. + externalCustomDatabase: + # Specify the name of the secret that contains the datasource configuration to use. + datasourceRef: + # Persistent Volume Claim to access the JDBC Database Driver + driverPvc: + + readinessProbe: + # Specify the number of seconds after the container has started before readiness probe is initiated. + initialDelaySeconds: 5 + # Specify how often (in seconds) to perform the probe. + periodSeconds: 5 + # Specify how many times Kubernetes will try before giving up when a pod starts and the probe fails. Giving up means marking the pod as Unready. + failureThreshold: 45 + # Specify the number of seconds after which the readiness probe times out. + timeoutSeconds: 5 + + livenessProbe: + # Specify the number of seconds after the container has started before liveness probe is initiated. + initialDelaySeconds: 300 + # Specify how often (in seconds) to perform the probe. + periodSeconds: 10 + # Specify how many times Kubernetes will try before giving up when a pod starts and the probe fails. Giving up means restarting the pod. + failureThreshold: 10 + # Specify the number of seconds after which the liveness probe times out. + timeoutSeconds: 5 + + customization: + # Specify the name of the secret that contains the TLS certificate you want to use. If the parameter is left empty, a default certificate is used. + securitySecretRef: + # Specify the name of the secret that contains the configuration files required to use the IBM Business Automation Insights emitter. + baiEmitterSecretRef: + # Specify the label attached to some nodes. Pods are scheduled to the nodes with this label. If the parameter is empty, pods are scheduled randomly. + authSecretRef: + + oidc: + # Specify whether to enable OpenId Authentication. + enabled: false + # Specify the OIDC Allowed Domains + allowedDomains: + + networkPolicy: + # Enable creation of NetworkPolicy resources. + enabled: true + # For Kubernetes v1.4, v1.5 and v1.6, use 'extensions/v1beta1' + # For Kubernetes v1.7, use 'networking.k8s.io/v1' + apiVersion: networking.k8s.io/v1 diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_decisions_ads.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_decisions_ads.yaml new file mode 100644 index 00000000..b2d70bbc --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_decisions_ads.yaml @@ -0,0 +1,280 @@ +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + ################################################################################################################# + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install and ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, you should merge + ## the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise[_FC]_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this templated in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + + + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + shared_configuration: + + ## CP4A patterns or capabilities to be deployed. This CR represents the "decisions_ads" pattern that brings ADS Designer and ADS Runtime. + sc_deployment_patterns: decisions_ads + + ## The optional components to be installed if listed here. + ## This is normally populated by the User script based on input from the user. + ## User can also manually specify the optional components to be deployed here. + ## ADS has two 'ads_designer' and 'ads_runtime' optional components. + ## If ads_designer is set, you also have to set the 'ads_configuration.decision_designer.enabled' flag to true to install it. + ## If ads_runtime is set, you also have to set the 'ads_configuration.decision_runtime.enabled' flag to true to install it. + sc_optional_components: + + + ######################################################################## + ######## IBM Business Automation Decision Services ######## + ######################################################################## + + ads_configuration: + + decision_designer: + enabled: false + admin_secret_name: ibm-dba-ads-designer-secret + + # Config maps containing certificates to trust + git_servers_certs: + wml_providers_certs: + other_trusted_certs: + + ingress: + # If omitted, calculated as designer.ads.{{ sc_deployment_hostname_suffix }} + hostname: "" + tls_secret_name: ibm-dba-ads-designer-tls-cert + +# # Optional: you can provide a custom CA for routes. +# tls_ca_secret_name: +# +# # Optional: you can add custom annotations to routes or ingresses. +# +# # For routes +# custom_annotations: +# haproxy.router.openshift.io/timeout: 1m +# +# # For k8s Ingresses on certified kubernetes (CNCF) +# custom_annotations: +# nginx.ingress.kubernetes.io/proxy-body-size: 8m + + decision_runtime: + enabled: false + admin_secret_name: ibm-dba-ads-runtime-secret + # Authentication mode can be "basic" or "ums". + authentication_mode: "basic" + archive_repository: + # Left part of the url to access your decision archives. + # Optional credentials are in decision_runtime.admin_secret_name + url_prefix: "" + + # Only for debugging, default value is true. + internal_tls: true + + ingress: + # If omitted, calculated as designer.ads.{{ sc_deployment_hostname_suffix }} + hostname: "" + tls_secret_name: ibm-dba-ads-runtime-tls-cert + +# # Optional: you can provide a custom CA for routes. +# tls_ca_secret_name: +# +# # Optional: you can add custom annotations to routes or ingresses. +# +# # For routes +# custom_annotations: +# haproxy.router.openshift.io/timeout: 1m +# +# # For k8s Ingresses on certified kubernetes (CNCF) +# custom_annotations: +# nginx.ingress.kubernetes.io/proxy-body-size: 8m + + rr_integration: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-rrintegration + tag: 20.0.2 + + resources: + requests: + cpu: '200m' + memory: '256Mi' + limits: + cpu: '500m' + memory: '512Mi' + + mongo: + image: + repository: cp.icr.io/cp/cp4a/ads/mongo + tag: 4.2.5 + + persistence: + use_dynamic_provisioning: true + # defaulted to shared_configuration.storage_configuration.sc_fast_file_storage_classname + storage_class_name: "" + resources: + requests: + storage: 3Gi + + resources: + requests: + cpu: '500m' + memory: '256Mi' + limits: + cpu: '1000m' + memory: '1Gi' + + front: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-front + tag: 20.0.2 + + replica_count: 1 + resources: + requests: + cpu: '500m' + memory: '512Mi' + limits: + cpu: '2000m' + memory: '2Gi' + + download_service: + ums_enabled: true + image: + repository: cp.icr.io/cp/cp4a/ads/ads-download + tag: 20.0.2 + + replica_count: 1 + resources: + requests: + cpu: '100m' + memory: '128Mi' + limits: + cpu: '100m' + memory: '128Mi' + + rest_api: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-restapi + tag: 20.0.2 + replica_count: 2 + + resources: + requests: + cpu: '500m' + memory: '2Gi' + limits: + cpu: '2000m' + memory: '512Mi' + + git_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-gitservice + tag: 20.0.2 + replica_count: 2 + + persistence: + use_dynamic_provisioning: true + # defaulted to shared_configuration.storage_configuration.sc_fast_file_storage_classname + storage_class_name: "" + resources: + requests: + storage: 6Gi + + resources: + requests: + cpu: '500m' + memory: '2Gi' + limits: + cpu: '2000m' + memory: '512Mi' + + parsing_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-parsing + tag: 20.0.2 + replica_count: 2 + + resources: + requests: + cpu: '500m' + memory: '2Gi' + limits: + cpu: '2000m' + memory: '512Mi' + + run_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-run + tag: 20.0.2 + replica_count: 2 + + resources: + requests: + cpu: '500m' + memory: '2Gi' + limits: + cpu: '2000m' + memory: '512Mi' + + decision_runtime_service: + image: + repository: cp.icr.io/cp/cp4a/ads/ads-runtime + tag: 20.0.2 + replica_count: 2 + + # Whether the decision runtime should include an exception stack trace when an incident occurs. + # This should only be used for debugging purposes, as a stack trace may expose sensitive internal data. + stack_trace_enabled: false + + tls: + # whether TLS is required for decision store or ML providers connections (HTTPS protocol) + enabled: true + # name of the config map holding the TLS certificates + certs_config_map_name: + # whether self-signed certificates are allowed + allow_self_signed: false + verify_hostname: true + + cache: + config: + expiry: '' + resources: |- + 100 + + resources: + requests: + cpu: '500m' + memory: '2Gi' + limits: + cpu: '2000m' + memory: '512Mi' diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_digitalworker.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_digitalworker.yaml new file mode 100644 index 00000000..4653373d --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_digitalworker.yaml @@ -0,0 +1,190 @@ +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + + ################################################################################################################# + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + shared_configuration: + + ## CP4A patterns or capabilities to be deployed. This is not used by ADW. ADW doesn't support enterprise pattern yet + sc_deployment_patterns: "digitalworker" + + # ADW only reacts to one optional component : bai + sc_optional_components: + + ######################################################################## + ######## IBM Business Automation Digital Worker ######## + ######################################################################## + # You can further customize the adw_configuration section as explained in the knowledge center. + # See ibm_cp4a_cr_enterprise_FC_digitalworker.yaml file in descriptors/patterns for all parameters and their default values. + adw_configuration: + + global: + # Optional : dedicated ADW image pull secret. default : none + imagePullSecret: adw-reg-cred + kubernetes: + # Optional : the name of the Pod Security Policy you created to override the default one. + serviceAccountName: adw-psp-sa + # Optional : the name of the adw secret you created to override the default one. + adwSecret: "adw-secret" + + userManagement: + dwAdmin: # the Digital Worker user ID with admin rights. If not set, it will default to `oidcUserName` configured in adwSecret + searchUsers: + filterAttributes: # Coma separated attributes used to search users in LDAP through SCIM. Default: "displayName,uid" + resultAttribute: # Attribute of user returned from SCIM. Default: "displayName" + searchGroups: + filterAttributes: # Coma separated attributes used to search groups in LDAP through SCIM. Default: "displayName" + resultAttribute: # Attribute of group returned from SCIM. Default: "displayName" + + # Optional : trace, debug, info, warn, error. Default : info + logLevel: "info" + + designer: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + hostname: "https://designer.{{ shared_configuration.sc_deployment_hostname_suffix }}" + service_type: # Route or ClusterIP or LoadBalancer + port: # The port used by external users, exposed as a `LoadBalancer` service + resources: + limits: + cpu: # The maximum amount of CPU to allocate to each pod + memory: # The maximum memory to allocate to each pod + requests: + memory: # The minimum memory required to start a pod + cpu: # The minimum amount of CPU required to start a pod + + + runtime: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + # the number of replicas for the runtime statefulset. Default is 1. + replicas : 1 + hostname: "https://runtime.{{ shared_configuration.sc_deployment_hostname_suffix }}" + service_type: # Route or ClusterIP or LoadBalancer + port: # The port used by external users, exposed as a `LoadBalancer` service + resources: + limits: + cpu: # The maximum amount of CPU to allocate to each pod + memory: # The maximum memory to allocate to each pod + requests: + memory: # The minimum memory required to start a pod + cpu: # The minimum amount of CPU required to start a pod + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname}}" + useDynamicProvisioning: # default : true. Set to false if you want to manually defined storage + existingClaimName: # An Existing PVC name for the runtime volume + + management: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + # the number of replicas for the management statefulset. Default is 1. + replicas : 1 + hostname: "https://management.{{ shared_configuration.sc_deployment_hostname_suffix }}" + service_type: # Route or ClusterIP or LoadBalancer + port: # The port used by external users, exposed as a `LoadBalancer` service + resources: + limits: + cpu: # The maximum amount of CPU to allocate to each pod + memory: # The maximum memory to allocate to each pod + requests: + memory: # The minimum memory required to start a pod + cpu: # The minimum amount of CPU required to start a pod + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname}}" + useDynamicProvisioning: # default : true. Set to false if you want to manually defined storage + existingClaimName: # An Existing PVC name for management volume + runLogLevel: # The default pino logging level for runs. Available log levels: silent, fatal, error, warn, info, debug, trace. Default is info + runTimeout: #The default timeout for runs in seconds + + init: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + + mongo: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + # the number of replicas for the mongo replicaset. Default is 1. + replicas : 1 + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname}}" + useDynamicProvisioning: # default : true. Set to false if you want to manually defined storage + size: # The minimum size of the persistent volume + + npmRegistry: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + # the number of replicas for the npmRegistry statefulset. Default is 1. + replicas : 1 + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname}}" + useDynamicProvisioning: # default : true. Set to false if you want to manually defined storage + size: # The minimum size of the persistent volume + existingClaimName: # An Existing PVC name for npmRegistry volume + + setup: + image: + repository: # can be used to pull the image from a specific docker registry + tag: # can be used to pull a specific version of the image. + pullPolicy: # "Always, IfNotPresent" default : not set + resources: + limits: + memory: # The maximum memory to allocate to each pod + requests: + memory: # The minimum memory required to start a pod + cpu: # The minimum amount of CPU required to start a pod + + #baiKafka: If you want to monitor ADW with BAI, uncomment this block and provide the necessary parameters + # specify the ingress topic where ADW events should be sent + #topic: "{{ meta.name }}-ibm-bai-ingress" + #kerberosEnabled: # Set to true to enable Kerberos authentication to the Kafka server. Default is false + #baiElasticsearch: + # specify the URL of elasticSearch + #url: "" + # specify the URL of kibana + #kibanaUrl: "" diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_foundation.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_foundation.yaml new file mode 100644 index 00000000..f8fa9e99 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_foundation.yaml @@ -0,0 +1,778 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + shared_configuration: + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: "" + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: "" + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## For non-OCP (e.g., CNCF platforms such as AWS, GKE, etc), this parameter is required + sc_run_as_user: + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob + tag: 20.0.2 + + ## All CP4A components should use this pull_policy as the default, but it can override by each component + pull_policy: IfNotPresent + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "foundation" pattern (aka FileNet Content Manager), which includes the following + ## mandatory components: icn (BAN/Navigator), rr (Resource Registry) and optional components: ums, bas, and bai + sc_deployment_patterns: foundation + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. User can + ## also manually specify the optional components to be deployed here. For this foundation CR, the optional components are: ums, bas, and bai + sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: demo, enteprise + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + ############################################################################################## + # Kafka client configuration for IBM Business Automation Insights and other ICP4A products. + # + # The customization of the following 4 parameters is "" only if you have + # specificed "bai" as part of the sc_optional_components to specify that Business Automation + # Insights must be installed. + # + # Otherwise, if Business Automation Insights is not being installed, there is no need to configure + # these parameters and they can be kept empty. + ############################################################################################## + kafka_configuration: + # Comma-separated list of hosts:port for connection to the Kafka cluster. + # This field is mandatory for any Kafka configuration. + bootstrap_servers: "" + # Value for the Kafka security.protocol property + # Valid values: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + security_protocol: + # Value for the Kafka sasl.mechanism property + # Valid values: PLAIN, SCRAM-SHA-512. Default: PLAIN. + sasl_mechanism: + # If the Kafka server requires authentication or uses SSL communications, the value of this field + # must provide the name of a secret that holds the following keys as base64-encoded strings: + # kafka-username: Kafka username; leave empty if no authentication + # kafka-password: Kafka password; leave empty if no authentication + # kafka-server-certificate: server certificate for SSL communications; leave empty if SSL protocol is not used + connection_secret_name: + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for UMS (User Management Service) + dc_ums_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_oauth_alternate_hosts and dc_ums_oauth_alternate_ports + ## are set. + dc_ums_oauth_type: "" + ## Provide the database server name or IP address of the database server. + dc_ums_oauth_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_oauth_port: "" + ## Provide the name of the database for UMS. For example: "UMSDB" + dc_ums_oauth_name: "" + dc_ums_oauth_schema: OAuthDBSchema + dc_ums_oauth_ssl: true + dc_ums_oauth_ssl_secret_name: + dc_ums_oauth_driverfiles: + dc_ums_oauth_alternate_hosts: + dc_ums_oauth_alternate_ports: + + ## The database database configuration for teamserver + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_teamserver_alternate_hosts and dc_ums_teamserver_alternate_ports + ## are set. + dc_ums_teamserver_type: "" + dc_ums_teamserver_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_teamserver_port: "" + ## Provide the name of the database for UMS teamserver. For example: "UMSDB" + dc_ums_teamserver_name: "" + dc_ums_teamserver_ssl: true + dc_ums_teamserver_ssl_secret_name: + dc_ums_teamserver_driverfiles: + dc_ums_teamserver_alternate_hosts: + dc_ums_teamserver_alternate_ports: + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + + ## Navigator secret that contains user credentials for LDAP and database + ban_secret_name: ibm-ban-secret + + ## The architecture of the cluster. This is the default for Linux and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + + ## The default repository is the IBM Entitled Registry + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## This is the initial default resource requests. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default ICN Production settings. Make the necessary changes as you see fit. + icn_production_setting: + timezone: Etc/UTC + jvm_initial_heap_percentage: 40 + jvm_max_heap_percentage: 66 + jvm_customize_options: + icn_db_type: db2 + icn_jndids_name: ECMClientDS + icn_schema: ICNDB + icn_table_space: ICNDB + allow_remote_plugins_via_http: false + + + ## Default settings for monitoring + monitor_enabled: false + ## Default settings for logging + logging_enabled: false + + ## Persistent Volume Claims for Navigator. The Operator will create the PVC using the names below by default. + datavolume: + existing_pvc_for_icn_cfgstore: "icn-cfgstore" + existing_pvc_for_icn_logstore: "icn-logstore" + existing_pvc_for_icn_pluginstore: "icn-pluginstore" + existing_pvc_for_icnvw_cachestore: "icn-vw-cachestore" + existing_pvc_for_icnvw_logstore: "icn-vw-logstore" + existing_pvc_for_icn_aspera: "icn-asperastore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + existing_claim_name: + replica_count: 2 + service_type: Route + # your external UMS host name, only required if there is no sc_deployment_hostname_suffix given + hostname: + port: 443 + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + admin_secret_name: ibm-dba-ums-secret + ## optional for secure communication with UMS + external_tls_secret_name: ibm-dba-ums-external-tls-secret + ## optional for secure communication with UMS + external_tls_ca_secret_name: ibm-dba-ums-external-tls-ca-secret + ## optional for secure communication with UMS + external_tls_teams_secret_name: ibm-dba-ums-external-tls-teams-secret + ## optional for secure communication with UMS + external_tls_scim_secret_name: ibm-dba-ums-external-tls-scim-secret + ## optional for secure communication with UMS + external_tls_sso_secret_name: ibm-dba-ums-external-tls-sso-secret + oauth: + ## optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret + client_manager_group: + ## optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret + token_manager_group: + ## optional: lifetime of OAuth access_tokens. default is 7200s + access_token_lifetime: + ## optional: lifetime of app-tokens. default is 366d + app_token_lifetime: + ## optional: lifetime of app-passwords. default is 366d + app_password_lifetime: + ## optional: maximimum number of app-tokens or app-passwords per client. default is 100 + app_token_or_password_limit: + ## optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 + client_secret_encoding: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + ## Horizontal Pod Autoscaler + autoscaling: + enabled: true + min_replicas: 2 + max_replicas: 5 + target_average_utilization: 98 + use_custom_jdbc_drivers: false + use_custom_binaries: false + custom_secret_name: + custom_xml: + logs: + console_format: json + console_log_level: INFO + console_source: message,trace,accessLog,ffdc,audit + trace_format: ENHANCED + trace_specification: "*=info" + + ################################################################## + ######## Resource Registry configuration ######## + ################################################################## + resource_registry_configuration: + # If you inputed hostname and port here. They will be used always + # If you are using pattern mode (the shared_configuration.sc_deployment_patterns contains value) + # Then you don't need to fill the hostname and port. It will use shared_configuration.sc_deployment_hostname_suffix to generate one + # But if you haven't input suffix. And no hostname port assigned. A error will be reported in operator log during deploy + # For non pattern mode you must assign a valid hostname and port here + hostname: "{{ 'rr.' + shared_configuration.sc_deployment_hostname_suffix }}" + port: 443 + images: + pull_policy: IfNotPresent + resource_registry: + repository: cp.icr.io/cp/cp4a/aae/dba-etcd + tag: 20.0.2 + admin_secret_name: resource-registry-admin-secret + replica_size: 3 + probe: + liveness: + initial_delay_seconds: 60 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + readiness: + initial_delay_seconds: 10 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + resource: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "128Mi" + auto_backup: + enable: true + minimal_time_interval: 300 + pvc_name: "{{ meta.name }}-dba-rr-pvc" + dynamic_provision: + enable: true + size: 3Gi + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + ############################################################################# + ## This section contains the BAStudio component configurations # + ## it's the optinal component: app_designer, ads_designer, bas # + ############################################################################# + bastudio_configuration: + # If you inputed hostname and port here. They will be used always + # If you are using pattern mode (the shared_configuration.sc_deployment_patterns contains value) + # Then you don't need to fill the hostname and port. It will use shared_configuration.sc_deployment_hostname_suffix to generate one + # But if you haven't input suffix. And no hostname port assigned. A error will be reported in operator log during deploy + # For non pattern mode you must assign a valid hostname and port here + hostname: "{{ 'bas.' + shared_configuration.sc_deployment_hostname_suffix }}" + port: 443 + images: + pull_policy: IfNotPresent + bastudio: + repository: cp.icr.io/cp/cp4a/bas/bastudio + tag: 20.0.2 + #Adjust this one if you created the secret with name other than the default + # Inside the admin secret. There are two must fields + # dbPassword: + # dbUsername: + admin_secret_name: "{{ meta.name }}-bas-admin-secret" + #Provide BAStudio default administrator ID + admin_user: "" + replica_size: 1 + database: + # The database type used. Only DB2, Oracle supported + type: "db2" + #DB2 - Provide the database server hostname for BAStudio use + host: "" + # DB2 - Provide the database name for BAStudio use + # The database provided should be created by the BAStudio SQL script template. + name: "" + # DB2 - Provide the database server port for BAStudio use + port: "" + # DB2 - If you want to enable database automatic client reroute (ACR) for HADR, you must configure alternative_host and alternative_port. Otherwise, leave them blank. + alternative_host: + alternative_port: + # If you enabled SSL for Database please enable this one to set it to true + ssl_enabled: false + # Oracle - If you are using Oracle input the oracle database connection URL here + oracle_url: + cm_max_pool_size: '50' + cm_min_pool_size: '2' + # If you enabled the SSL for database. Please save the TLS certificate used by databased in a secret and put the name here + certificate_secret_name: + # If you are using custom JDBC (for example using Oracle or some special DB2 driver). Please set this one to true + user_custom_jdbc_drivers: false + # The PVC name which bind to the PV which have the custom JDBC driver files stored + custom_jdbc_pvc: + # The custom JDBC file set + jdbc_driver_files: 'db2jcc4.jar db2jcc_license_cisuz.jar db2jcc_license_cu.jar' + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 3 + targetAverageUtilization: 80 + external_connection_timeout: 60s + # Custom liberty XML configurations + custom_xml: + # The secret name which contain custom liberty configurations + custom_secret_name: + # The Business Automation Custom XML configurations + bastudio_custom_xml: + # If you don't want to use walkme script. You can set this one to false + use_walkme: true + max_cached_objects_during_refactoring: 256 + logs: + consoleFormat: 'json' + consoleLogLevel: 'INFO' + consoleSource: 'message,trace,accessLog,ffdc,audit' + traceFormat: 'ENHANCED' + traceSpecification: '*=info' + tls: + tlsTrustList: [] + liveness_probe: + initialDelaySeconds: 300 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 3 + successThreshold: 1 + readiness_probe: + initialDelaySeconds: 240 + periodSeconds: 5 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + resources: + bastudio: + limits: + cpu: '4' + memory: '3Gi' + requests: + cpu: '1' + memory: '1Gi' + init_process: + limits: + cpu: '500m' + memory: '512Mi' + requests: + cpu: '100m' + memory: '128Mi' + csrf_referrer: + whitelist: '' + jms_server: + image: + ## Image name for Java Messaging Service container + repository: cp.icr.io/cp/cp4a/bas/jms + ## Image tag for Java Messaging Service container + tag: 20.0.2 + ## Pull policy for Java Messaging Service container + pull_policy: Always + tls: + ## TLS secret name for Java Message Service (JMS) + tls_secret_name: "{{ meta.name }}-bastudio-jms-tls" + resources: + limits: + ## Memory limit for JMS configuration + memory: "1Gi" + ## CPU limit for JMS configuration + cpu: "1000m" + requests: + ## Requested amount of memory for JMS configuration + memory: "256Mi" + ## Requested amount of CPU for JMS configuration + cpu: "200m" + storage: + ## Whether to enable persistent storage for JMS + persistent: true + ## Size for JMS persistent storage + size: "1Gi" + ## Whether to enable dynamic provisioning for JMS persistent storage + use_dynamic_provisioning: true + ## Access modes for JMS persistent storage + access_modes: + - ReadWriteOnce + ## Storage class name for JMS persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + #----------------------------------------------------------------------- + # App Engine Playback Server (playback_server) can be only one instance. + # This is different from App Engine + # (where application_engine_configuration is a list and you can deploy multiple instances). + #----------------------------------------------------------------------- + playback_server: + images: + pull_policy: IfNotPresent + db_job: + repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/aae/solution-server + tag: 20.0.2 + # If you inputed hostname and port here. They will be used always + # If you are using pattern mode (the shared_configuration.sc_deployment_patterns contains value) + # Then you don't need to fill the hostname and port. It will use shared_configuration.sc_deployment_hostname_suffix to generate one + # But if you haven't input suffix. And no hostname port assigned. A error will be reported in operator log during deploy + # For non pattern mode you must assign a valid hostname and port here + hostname: "{{ 'ae-pbk.' + shared_configuration.sc_deployment_hostname_suffix }}" + port: 443 + # Inside the admin secret. There are two must fields + # AE_DATABASE_PWD: + # AE_DATABASE_USER: + admin_secret_name: + # The default admin user id for application engine + # The user ID should be bootstrap admin ID for IBM Business Automation Navigator. It is case sensitive. + # The same ID should be a User Management Service (UMS) admin user also. + admin_user: + external_tls_secret: + external_connection_timeout: 90s + replica_size: 1 + ## optional when db2, must required when oracle + user_custom_jdbc_drivers: false + service_type: Route + autoscaling: + enabled: false + max_replicas: 5 + min_replicas: 2 + target_cpu_utilization_percentage: 80 + database: + # AE Database host name or IP when the database is DB2 + host: + # AE Database name when the database is DB2 + name: + # AE database port number when the database is DB2 + port: + ## If you setup DB2 HADR and want to use it, you need to configure alternative_host and alternative_port, or else, leave is as blank. + alternative_host: + alternative_port: + ## Only DB2, Oracle is supported + type: db2 + ## Required only when type is Oracle, both ssl and non-ssl. The format must be purely oracle descriptor like (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=)(PORT=))(CONNECT_DATA=(SERVICE_NAME=))) + oracle_url_without_wallet_directory: + enable_ssl: false + ## Required only when type is Oracle and enable_ssl is true. The format must be purely oracle descriptor. SSO wallet directory must be specified and fixed to (MY_WALLET_DIRECTORY=/shared/resources/oracle/wallet). + oracle_url_with_wallet_directory: + ## Required only when enable_ssl is true, both db2 and oracle db type + db_cert_secret_name: + ## Required only when type is oracle and enable_ssl is true. + oracle_sso_wallet_secret_name: + ## Optional. If it is empty, the DBASB is default when db2 and the AE_DATABASE_USER set in the admin_secret_name is default when oracle + current_schema: DBASB + initial_pool_size: 1 + max_pool_size: 10 + uv_thread_pool_size: 4 + max_lru_cache_size: 1000 + max_lru_cache_age: 600000 + dbcompatibility_max_retries: 30 + dbcompatibility_retry_interval: 10 + ## The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled + custom_jdbc_pvc: + log_level: + node: info + browser: 2 + content_security_policy: + enable: false + whitelist: + env: + max_size_lru_cache_rr: 1000 + server_env_type: development + purge_stale_apps_interval: 86400000 + apps_threshold: 100 + stale_threshold: 172800000 + max_age: + auth_cookie: "900000" + csrf_cookie: "3600000" + static_asset: "2592000" + hsts_header: "2592000" + probe: + liveness: + failure_threshold: 5 + initial_delay_seconds: 60 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + readiness: + failure_threshold: 5 + initial_delay_seconds: 10 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + # Redis settings only when you set session.use_external_store to true + redis: + # Your external redis host/ip + host: localhost + # Your external redis port + port: 6379 + ttl: 1800 + resource_ae: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 300m + memory: 512Mi + resource_init: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + session: + check_period: "3600000" + duration: "1800000" + max: "10000" + resave: "false" + rolling: "true" + save_uninitialized: "false" + # By setting this option to true. The AE will use external Redis as session storage + # To support multiple AE pods + use_external_store: "false" + tls: + tls_trust_list: [] + # If you want to make the replicate size more than 1 for this cluster. Then you must enable the shared storage + share_storage: + enabled: false + # If you create the PV manually. Then please provide the PVC name bind here + pvc_name: + auto_provision: + enabled: false + # Required if you enabled the auto provision + storage_class: + size: 20Gi + + ######################################################################## + ######## IBM Business Automation Insights configuration ######## + ######################################################################## + bai_configuration: + imageCredentials: + registry: cp.icr.io/cp/cp4a + + # Set to true to automatically create the OpenShift routes when sc_deployment_platform is set + # to OCP or ROKS. + createRoutes: false + + # Set to true to enable the Flink job for sending events to HDFS. + ingestion: + install: false + + # Set to true to enable the Flink job for Digital Worker. + adw: + install: false + + # Set to true to enable the Flink job for BAW. + bpmn: + install: false + + # Set to true to enable the Flink job for BAWAdv. + bawadv: + install: false + + # Set to true to enable the Flink job for ICM. + icm: + install: false + + # Set to true to enable the Flink job for ODM. + odm: + install: false + + # Set to true to enable the Flink job for Content. + content: + install: false diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_ier_content.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_ier_content.yaml new file mode 100644 index 00000000..a99bd06a --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_ier_content.yaml @@ -0,0 +1,91 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2019. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: ier + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ## TIPS: The names of all variables in the spec field are converted to snake_case by the operator before running ansible + ## For example, serviceAccount in the spec becomes service_account in ansible. + ## It is recommended that you perform some type validation in Ansible on the variables to ensure that + ## your application is receiving expected input. + + ## shared configuration among all tribe + shared_configuration: + sc_image_repository: cp.icr.io + root_ca_secret: icp4a-root-ca + sc_optional_components: ier + sc_deployment_hostname_suffix: "9.30.182.206.nip.io" + storage_configuration: + sc_medium_file_storage_classname: "managed-nfs-storage" + image_pull_secrets: + - admin.registrykey + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the rest of the licenses. + sc_deployment_fncm_license: "" + sc_deployment_type: enterprise + sc_deployment_platform: OCP + + ######################################################################## + ######## IBM Enterprise Records configuration ######## + ######################################################################## + ier_configuration: + arch: + amd64: "3 - Most preferred" + replica_count: 1 + image: + repository: cp.icr.io/cp/cp4a/ier/ier + tag: 20.0.2 + pull_policy: IfnotPresent + # Logging for workloads + log: + format: json + # resource and autoscaling setting + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + # Horizontal Pod Autoscaler + auto_scaling: + enabled: false + max_replicas: 3 + min_replicas: 1 + target_cpu_utilization_percentage: 80 + # IER Production setting + ier_production_setting: + license: accept + collectd_enable_plugin_write_graphite: false + run_as_user: 50001 + # Specify the names of existing persistent volume claims to be used by your application. + # Specify an empty string if you don't have existing persistent volume claim. + datavolume: + existing_pvc_for_ier_instance: "ier-instance" + + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workflow-workstreams.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workflow-workstreams.yaml new file mode 100644 index 00000000..5a36ee98 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workflow-workstreams.yaml @@ -0,0 +1,1759 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: workflow-workstreams + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + shared_configuration: + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: "" + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: "" + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## For non-OCP (e.g., CNCF platforms such as AWS, GKE, etc), this parameter is required + sc_run_as_user: + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/baw/dba-umsregistration-initjob + tag: 20.0.2 + + ## All CP4A components should use this pull_policy as the default, but it can override by each component + pull_policy: IfNotPresent + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "workflow-workstreams" pattern, which includes the following + ## mandatory components: ban(Business Automation Navigator), ums (User Management Service), rr (Resource registry), app_engine( Application Engine) and optional components: bai + sc_deployment_patterns: workflow-workstreams + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: bai + sc_optional_components: bai + + ## The deployment type as selected by the user. Possible values are: demo, enterprise + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## Enable/disable ECM (FNCM) / BAN initialization (e.g., creation of P8 domain, creation/configuration of object stores, + ## creation/configuration of CSS servers, and initialization of Navigator (ICN)). If the "initialize_configuration" section + ## is defined in the CR, then that configuration will take precedence overriding this parameter. + sc_content_initialization: false + ## Enable/disable the ECM (FNCM) / BAN verification (e.g., creation of test folder, creation of test document, + ## execution of CBR search, and creation of Navigator demo repository and desktop). If the "verify_configuration" + ## section is defined in the CR, then that configuration will take precedence overriding this parameter. + sc_content_verification: false + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + # Kafka client configuration for IBM Business Automation Insights and other ICP4A products. + # + # The customization of the following 4 parameters is "" only if you have + # specificed "bai" as part of the sc_optional_components to specify that Business Automation + # Insights must be installed. + # + # Otherwise, if Business Automation Insights is not being installed, there is no need to configure + # these parameters and they can be kept empty. + ############################################################################################## + kafka_configuration: + # Comma-separated list of hosts:port for connection to the Kafka cluster. + # This field is mandatory for any Kafka configuration. + bootstrap_servers: "" + # Value for the Kafka security.protocol property + # Valid values: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + security_protocol: + # Value for the Kafka sasl.mechanism property + # Valid values: PLAIN, SCRAM-SHA-512. Default: PLAIN. + sasl_mechanism: + # If the Kafka server requires authentication or uses SSL communications, the value of this field + # must provide the name of a secret that holds the following keys as base64-encoded strings: + # kafka-username: Kafka username; leave empty if no authentication + # kafka-password: Kafka password; leave empty if no authentication + # kafka-server-certificate: server certificate for SSL communications; leave empty if SSL protocol is not used + connection_secret_name: + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for the GCD datasource for CPE + dc_gcd_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". + dc_database_type: "" + ## The GCD non-XA datasource name. The default value is "FNGCDDS". + dc_common_gcd_datasource_name: "FNGCDDS" + ## The GCD XA datasource name. The default value is "FNGCDDSXA". + dc_common_gcd_xa_datasource_name: "FNGCDDSXA" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the name of the database for the GCD for CPE. For example: "GCDDB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_gcd_jdbc_url: "" + + ## If the database type is Db2 HADR, then complete the rest of the parameters below. + ## Provide the database server name or IP address of the standby database server. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for the document object store (DOCS) datasource for CPE + dc_os_datasources: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD configuration above. + - dc_database_type: "" + ## The DOCS non-XA datasource name. The default value is "FNDSDOCS". + dc_common_os_datasource_name: "FNDSDOCS" + ## The DOCS XA datasource name. The default value is "FNDSDOCSXA". + dc_common_os_xa_datasource_name: "FNDSDOCSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for the target object store (TOS) datasource for CPE + - dc_database_type: "" + ## The TOS non-XA datasource name. The default value is "FNDSTOS". + dc_common_os_datasource_name: "FNDSTOS" + ## The TOS XA datasource name. The default value is "FNDSTOSXA". + dc_common_os_xa_datasource_name: "FNDSTOSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for the design object store (DOS) datasource for CPE + - dc_database_type: "" + ## The DOS non-XA datasource name. The default value is "FNDSDOS". + dc_common_os_datasource_name: "FNDSDOS" + ## The DOS XA datasource name. The default value is "FNDSDOSXA". + dc_common_os_xa_datasource_name: "FNDSDOSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for UMS (User Management Service) + dc_ums_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_oauth_alternate_hosts and dc_ums_oauth_alternate_ports + ## are set. + dc_ums_oauth_type: "" + ## Provide the database server name or IP address of the database server. + dc_ums_oauth_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_oauth_port: "" + ## Provide the name of the database for UMS. For example: "UMSDB" + dc_ums_oauth_name: "" + dc_ums_oauth_schema: OAuthDBSchema + dc_ums_oauth_ssl: true + dc_ums_oauth_ssl_secret_name: "" + dc_ums_oauth_driverfiles: + dc_ums_oauth_alternate_hosts: + dc_ums_oauth_alternate_ports: + + ## The database database configuration for teamserver + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_teamserver_alternate_hosts and dc_ums_teamserver_alternate_ports + ## are set. + dc_ums_teamserver_type: "" + dc_ums_teamserver_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_teamserver_port: "" + ## Provide the name of the database for UMS teamserver. For example: "UMSDB" + dc_ums_teamserver_name: "" + dc_ums_teamserver_ssl: true + dc_ums_teamserver_ssl_secret_name: "" + dc_ums_teamserver_driverfiles: + dc_ums_teamserver_alternate_hosts: + dc_ums_teamserver_alternate_ports: + + + ######################################################################## + ######## IBM Business Automation Workflow configuration ######## + ######################################################################## + baw_configuration: + ## The baw_configuration is a list. You can deploy multiple instances of Workflow server and assign different configurations for each instance. + ## For each instance, baw_configuration.name and baw_configuration.name.hostname must have different values. + - name: instance1 + ## If config the Process Portal for a federated environment + host_federated_portal: true + ## Workflow server service type. + service_type: "Route" + ## Workflow server hostname + hostname: "" + ## Workflow server port + port: 443 + ## Workflow server nodeport + nodeport: 30026 + ## Workflow server environment type. The possible value are "Development" or "Test" or "Staging" or "Production" + env_type: "Production" + ## Workflow server capability + capabilities: "workflow,workstreams" + ## Workflow server replica count + replicas: 1 + ## Provide Workflow server default administrator ID + admin_user: "" + ## The name of Workflow server admin secret + admin_secret_name: "baw-admin-secret" + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + # For scenario that customer has implemented their own Portal. E,g https://portal.mycompany.com + customized_portal_endpoint: "" + + federated_portal: + ## Content security policy additional origins for federate on premise BAW systems. E.g ["https://on-prem-baw1","https://on-prem-baw2"] + content_security_policy_additional_origins: [] + external_connection_timeout: "" + + tls: + ## Workflow server TLS secret that contains tls.key and tls.crt. + tls_secret_name: ibm-baw-tls + ## Workflow server TLS trust list. + tls_trust_list: + image: + ## Workflow image repository URL + repository: cp.icr.io/cp/cp4a/baw/workflow-server + ## Image tag for Workflow server container + tag: 20.0.2 + ## Pull policy for Workflow container + pullPolicy: IfNotPresent + pfs_bpd_database_init_job: + ## Database initialization image repository URL for Process Federation Server + repository: cp.icr.io/cp/cp4a/baw/pfs-bpd-database-init-prod + ## Image tag for database initialization for Process Federation Server + tag: 20.0.2 + ## Pull policy for Process Federation Server database initialization image + pullPolicy: IfNotPresent + upgrade_job: + ## Workflow server database handling image repository URL + repository: cp.icr.io/cp/cp4a/baw/workflow-server-dbhandling + ## Image tag for Workflow server database handling + tag: 20.0.2 + ## Pull policy for Workflow server database handling + pullPolicy: IfNotPresent + bas_auto_import_job: + ## BAS toolkit init image repository URL + repository: cp.icr.io/cp/cp4a/baw/toolkit-installer + ## Image tag for BAS toolkit init image + tag: 20.0.2 + ## Pull policy for BAS toolkit init image + pullPolicy: IfNotPresent + ibm_workplace_job: + ## IBM Workplace deployment job image repository URL + repository: cp.icr.io/cp/cp4a/baw/iaws-ibm-workplace + ## Image tag for IBM Workplace deployment job image + tag: 20.0.2 + ## Pull policy for IBM Workplace deployment job image + pull_policy: IfNotPresent + + ## The database configuration for Workflow server + database: + ## Whether to enable Secure Sockets Layer (SSL) support for the Workflow server database connection + ssl: false + ## Secret name for storing the database TLS certificate when an SSL connection is enabled + sslsecretname: "" + ## Workflow server database type + type: "DB2" + ## Workflow server database server name. + server_name: "" + ## Workflow server database name + database_name: "" + ## Workflow server database port. For DB2, the default value is "50000" + port: "" + ## Workflow server database secret name + secret_name: "" + ## Workflow server database connect pool maximum number of physical connections + cm_max_pool_size: 200 + dbcheck: + # The maximum waiting time (seconds) to check the database intialization status + wait_time: 900 + # The interval time (seconds) to check. + interval_time: 15 + hadr: + ## Database standby host for high availability disaster recovery (HADR) + ## To enable database HADR, configure both standby host and port + standbydb_host: + ## Database standby port for HADR + standbydb_port: + ## Retry interval for HADR + retryinterval: + ## Maximum retries for HADR + maxretries: + + ## The configurations for content integration + content_integration: + init_job_image: + ## Image name for content integration container. + repository: cp.icr.io/cp/cp4a/baw/iaws-ps-content-integration + ## Image tag for content integration container + tag: 20.0.2 + ## Pull policy for content integration container. + pull_policy: IfNotPresent + ## Domain name for content integration + domain_name: "" + ## Object Store name for content integration + object_store_name: "" + ## Admin secret for content integration + cpe_admin_secret: "" + + ## The configuration for case + case: + init_job_image: + ## Image name for CASE init job container. + repository: cp.icr.io/cp/cp4a/baw/workflow-server-case-initialization + ## Image tag for CASE init job container. + tag: 20.0.2 + ## Pull policy for CASE init job container. + pull_policy: IfNotPresent + + ## Domain name for CASE + domain_name: "P8DOMAIN" + ## Design Object Store name of CASE + object_store_name_dos: "DOS" + ## Target Object Store name of CASE + object_store_name_tos: "TOS" + ## Connection point name for Target Object Store + connection_point_name_tos: "cpe_conn_tos" + + ## PVC name for CASE network shared directory + network_shared_directory_pvc: "" + ## The custom package names if need to install custom package, the value format like "package1.zip, package2,zip" + custom_package_names: "" + ## The custom extension names if need to install custom extension, the value format like "extension1.zip, extension2,zip" + custom_extension_names: "" + ## The event emitter settings if you want to enable Case Event Emitter + event_emitter: + date_sql: + logical_unique_id: + solution_list: + + ## Workflow center configuration + workflow_center: + ## The URL of workflow center + url: "" + # The secret name of workflow center that contains username and password + secret_name: "" + # The hearbeat interval(seconds) to connect to workflow center + heartbeat_interval: 30 + + ## Application engine configuration, because application engine is an array, + ## when there is only one Application engine deployed along with this CR, below three parameters are not required. + ## when there is more then one application engine deployed, below three parameters are required. + appengine: + ## App Engine hostname + hostname: "" + ## App Engine port + port: "443" + ## App Engine admin secret name + admin_secret_name: "" + + ## The configuration for Resource Registry if you want to use external Resource Registry + resource_registry: + ## Resource Registry host name + hostname: "" + ## Resource Registry port + port: 443 + ## Resource Registry administrative secret + admin_secret_name: "" + + ## The configuration for Java Messaging Service(JMS) + jms: + image: + ## Image name for Java Messaging Service container + repository: cp.icr.io/cp/cp4a/baw/jms + ## Image tag for Java Messaging Service container + tag: 20.0.2 + ## Pull policy for Java Messaging Service container + pull_policy: IfNotPresent + tls: + ## TLS secret name for Java Message Service (JMS) + tls_secret_name: ibm-jms-tls-secret + resources: + limits: + ## Memory limit for JMS configuration + memory: "2Gi" + ## CPU limit for JMS configuration + cpu: "1000m" + requests: + ## Requested amount of memory for JMS configuration + memory: "512Mi" + ## Requested amount of CPU for JMS configuration + cpu: "200m" + storage: + ## Whether to enable persistent storage for JMS + persistent: true + ## Size for JMS persistent storage + size: "1Gi" + ## Whether to enable dynamic provisioning for JMS persistent storage + use_dynamic_provisioning: true + ## Access modes for JMS persistent storage + access_modes: + - ReadWriteOnce + ## Storage class name for JMS persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + ## Resource configuration + resources: + limits: + ## CPU limit for Workflow server. + cpu: 2 + ## Memory limit for Workflow server + memory: 2096Mi + requests: + ## Requested amount of CPU for Workflow server + cpu: "500m" + ## Requested amount of memory for Workflow server. + memory: 1048Mi + + ## liveness and readiness probes configuration + probe: + ws: + liveness_probe: + ## Number of seconds after the Workflow server container starts before the liveness probe is initiated + initial_delay_seconds: 300 + readinessProbe: + ## Number of seconds after the Workflow server container starts before the readiness probe is initiated + initial_delay_seconds: 240 + + ## log trace configuration + logs: + ## Format for printing logs on the console + console_format: "json" + ## Log level for printing logs on the console + console_log_level: "INFO" + ## Source of the logs for printing on the console + console_source: "message,trace,accessLog,ffdc,audit" + ## Format for printing message logs on the console + message_format: "basic" + ## Format for printing trace logs on the console + trace_format: "ENHANCED" + ## Specification for printing trace logs + trace_specification: "*=info" + + ## storage configuration + storage: + ## Set to true to use dynamic storage provisioner. If set to false, then need set existing_pvc_for_logstore and existing_pvc_for_dumpstore + use_dynamic_provisioning: true + ## The persistent volume claim for logs + existing_pvc_for_logstore: "" + ## The minimum size of the persistent volume used mounted as log store + size_for_logstore: "10Gi" + ## The persistent volume claim for dump files + existing_pvc_for_dumpstore: "" + ## The minimum size of the persistent volume used mounted as dump store + size_for_dumpstore: "10Gi" + + ## JVM options separated with space, for example: -Dtest1=test -Dtest2=test2 + jvm_customize_options: + + ## Workflow server custom plain XML snippet + ## liberty_custom_xml: |+ + ## + ## + ## + liberty_custom_xml: + + ## Workflow server custom XML secret name that contains custom configuraiton in Liberty server.xml + custom_xml_secret_name: + + ## Workflow server Lombardi custom XML secret name that contains custom configuraiton in 100Custom.xml + lombardi_custom_xml_secret_name: + + ## IBM Business Automation Insights integration configuration + business_event: + enable: false + enable_task_record: true + enable_task_api: false + subscription: + - {'app_name': '*','version': '*','component_type': '*','component_name': '*','element_type': '*','element_name': '*','nature': '*'} + + ##################################################################### + ## IBM App Engine production configuration ## + ##################################################################### + application_engine_configuration: + ## The application_engine_configuration is a list, you can deploy multiple instances of AppEngine, you can assign different configurations for each instance. + ## For each instance, application_engine_configuration.name and application_engine_configuration.name.hostname must be assigned to different values. + - name: instance1 + # If you inputed hostname and port here. They will be used always + # If you are using pattern mode (the shared_configuration.sc_deployment_patterns contains value) + # Then you don't need to fill the hostname and port. It will use shared_configuration.sc_deployment_hostname_suffix to generate one + # But if you haven't input suffix. And no hostname port assigned. A error will be reported in operator log during deploy + # For non pattern mode you must assign a valid hostname and port here + hostname: + port: 443 + # Inside the admin secret. There are two must fields + # AE_DATABASE_PWD: + # AE_DATABASE_USER: + admin_secret_name: + # The default admin user id for application engine + # The user ID should be bootstrap admin ID for IBM Business Automation Navigator. It is case sensitive. + # The same ID should be a User Management Service (UMS) admin user also. + admin_user: + external_tls_secret: + external_connection_timeout: 90s + replica_size: 1 + ## optional when db2, must required when oracle + user_custom_jdbc_drivers: false + service_type: Route + autoscaling: + enabled: false + max_replicas: 5 + min_replicas: 2 + target_cpu_utilization_percentage: 80 + database: + # AE Database host name or IP when the database is DB2 + host: + # AE Database name when the database is DB2 + name: + # AE database port number when the database is DB2 + port: + ## If you setup DB2 HADR and want to use it, you need to configure alternative_host and alternative_port, or else, leave is as blank. + alternative_host: + alternative_port: + ## Only DB2, Oracle is supported + type: db2 + ## Required only when type is Oracle, both ssl and non-ssl. The format must be purely oracle descriptor like (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=)(PORT=))(CONNECT_DATA=(SERVICE_NAME=))) + oracle_url_without_wallet_directory: + enable_ssl: false + ## Required only when type is Oracle and enable_ssl is true. The format must be purely oracle descriptor. SSO wallet directory must be specified and fixed to (MY_WALLET_DIRECTORY=/shared/resources/oracle/wallet). + oracle_url_with_wallet_directory: + ## Required only when enable_ssl is true, both db2 and oracle db type + db_cert_secret_name: + ## Required only when type is oracle and enable_ssl is true. + oracle_sso_wallet_secret_name: + ## Optional. If it is empty, the DBASB is default when db2 and the AE_DATABASE_USER set in the admin_secret_name is default when oracle + current_schema: DBASB + initial_pool_size: 1 + max_pool_size: 10 + uv_thread_pool_size: 4 + max_lru_cache_size: 1000 + max_lru_cache_age: 600000 + dbcompatibility_max_retries: 30 + dbcompatibility_retry_interval: 10 + ## The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled + custom_jdbc_pvc: + log_level: + node: info + browser: 2 + content_security_policy: + enable: false + whitelist: + env: + max_size_lru_cache_rr: 1000 + server_env_type: development + purge_stale_apps_interval: 86400000 + apps_threshold: 100 + stale_threshold: 172800000 + images: + pull_policy: IfNotPresent + db_job: + repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/aae/solution-server + tag: 20.0.2 + max_age: + auth_cookie: "900000" + csrf_cookie: "3600000" + static_asset: "2592000" + hsts_header: "2592000" + probe: + liveness: + failure_threshold: 5 + initial_delay_seconds: 60 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + readiness: + failure_threshold: 5 + initial_delay_seconds: 10 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + # Redis settings only when you set session.use_external_store to true + redis: + # Your external redis host/ip + host: localhost + # Your external redis port + port: 6379 + ttl: 1800 + resource_ae: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 300m + memory: 512Mi + resource_init: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + session: + check_period: "3600000" + duration: "1800000" + max: "10000" + resave: "false" + rolling: "true" + save_uninitialized: "false" + # By setting this option to true. The AE will use external Redis as session storage + # To support multiple AE pods + use_external_store: "false" + tls: + tls_trust_list: [] + # If you want to make the replicate size more than 1 for this cluster. Then you must enable the shared storage + share_storage: + enabled: false + # If you create the PV manually. Then please provide the PVC name bind here + pvc_name: + auto_provision: + enabled: false + # Required if you enabled the auto provision + storage_class: + size: 20Gi + + ######################################################################## + ######## IBM FileNet Content Manager configuration ######## + ######################################################################## + ecm_configuration: + + ## FNCM secret that contains GCD DB user name and password, Object Store DB user name and password, + ## LDAP user and password, CPE username and password, keystore password, and LTPA passs, etc. + fncm_secret_name: ibm-fncm-secret + + #################################### + ## Start of configuration for CPE ## + #################################### + cpe: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cpe + tag: ga-555-p8cpe + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 3072Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CPE Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cpe_production_setting: + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 18 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 33 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + ## Default JNDI name for GCD for non-XA data source + gcd_jndi_name: FNGCDDS + ## Default JNDI name for GCD for XA data source + gcd_jndixa_name: FNGCDDSXA + license_model: FNCM.PVUNonProd + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: true + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: true + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: true + + ## Persistent Volume Claims for CPE. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cpe_cfgstore: "cpe-cfgstore" + existing_pvc_for_cpe_logstore: "cpe-logstore" + existing_pvc_for_cpe_filestore: "cpe-filestore" + existing_pvc_for_cpe_icmrulestore: "cpe-icmrulesstore" + existing_pvc_for_cpe_textextstore: "cpe-textextstore" + existing_pvc_for_cpe_bootstrapstore: "cpe-bootstrapstore" + existing_pvc_for_cpe_fnlogstore: "cpe-fnlogstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ##################################### + ## Start of configuration for CMIS ## + ##################################### + cmis: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 256Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CMIS Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cmis_production_setting: + ## By default, this parameter is set by the Operator using the CPE service endpoint (e.g., "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM") + cpe_url: + + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + checkout_copycontent: true + default_maxitems: 25 + cvl_cache: true + secure_metadata_cache: false + filter_hidden_properties: true + querytime_limit: 180 + resumable_queries_forrest: true + escape_unsafe_string_characters: false + max_soap_size: 180 + print_pull_stacktrace: false + folder_first_search: false + ignore_root_documents: false + supporting_type_mutability: false + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: true + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: true + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: true + + ## Persistent Volume Claims for CMIS. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cmis_cfgstore: "cmis-cfgstore" + existing_pvc_for_cmis_logstore: "cmis-logstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 90 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 180 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + + ## Navigator secret that contains user credentials for LDAP and database + ban_secret_name: ibm-ban-secret + + ## The architecture of the cluster. This is the default for Linux and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + + ## The default repository is the IBM Entitled Registry + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default ICN Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + icn_production_setting: + timezone: Etc/UTC + jvm_initial_heap_percentage: 40 + jvm_max_heap_percentage: 66 + jvm_customize_options: + icn_db_type: db2 + icn_jndids_name: ECMClientDS + icn_schema: ICNDB + icn_table_space: ICNDB + allow_remote_plugins_via_http: false + + ## Default settings for monitoring + monitor_enabled: false + ## Default settings for logging + logging_enabled: false + + ## Persistent Volume Claims for ICN. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_icn_cfgstore: "icn-cfgstore" + existing_pvc_for_icn_logstore: "icn-logstore" + existing_pvc_for_icn_pluginstore: "icn-pluginstore" + existing_pvc_for_icnvw_cachestore: "icn-vw-cachestore" + existing_pvc_for_icnvw_logstore: "icn-vw-logstore" + existing_pvc_for_icn_aspera: "icn-asperastore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + existing_claim_name: + replica_count: 2 + service_type: Route + # your external UMS host name, only required if there is no sc_deployment_hostname_suffix given + hostname: + port: 443 + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + admin_secret_name: ibm-dba-ums-secret + ## optional for secure communication with UMS + external_tls_secret_name: + ## optional for secure communication with UMS + external_tls_ca_secret_name: + ## optional for secure communication with UMS + external_tls_teams_secret_name: + ## optional for secure communication with UMS + external_tls_scim_secret_name: + ## optional for secure communication with UMS + external_tls_sso_secret_name: + oauth: + ## optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret + client_manager_group: + ## optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret + token_manager_group: + ## optional: lifetime of OAuth access_tokens. default is 7200s + access_token_lifetime: + ## optional: lifetime of app-tokens. default is 366d + app_token_lifetime: + ## optional: lifetime of app-passwords. default is 366d + app_password_lifetime: + ## optional: maximimum number of app-tokens or app-passwords per client. default is 100 + app_token_or_password_limit: + ## optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 + client_secret_encoding: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + ## Horizontal Pod Autoscaler + autoscaling: + enabled: true + min_replicas: 2 + max_replicas: 5 + target_average_utilization: 98 + use_custom_jdbc_drivers: false + use_custom_binaries: false + custom_secret_name: + custom_xml: + logs: + console_format: json + console_log_level: INFO + console_source: message,trace,accessLog,ffdc,audit + trace_format: ENHANCED + trace_specification: "*=info" + + ######################################################################## + ######## Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + pull_policy: IfNotPresent + resource_registry: + repository: cp.icr.io/cp/cp4a/baw/dba-etcd + tag: 20.0.2 + admin_secret_name: resource-registry-admin-secret + replica_size: 3 + probe: + liveness: + initial_delay_seconds: 60 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + readiness: + initial_delay_seconds: 10 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + resource: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "128Mi" + auto_backup: + enable: true + minimal_time_interval: 300 + pvc_name: "{{ meta.name }}-dba-rr-pvc" + dynamic_provision: + enable: true + size: 3Gi + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + ######################################################################## + ######## IBM Process Federation Server configuration ######## + ######################################################################## + pfs_configuration: + ## Process Federation Server hostname + hostname: "" + ## Process Federation Server port + port: 443 + ## How the HTTPS endpoint service should be published. Possible values are ClusterIP, NodePort, Route + service_type: Route + + ## If use the external elasticsearch server, provide the following configuration + external_elasticsearch: + ## The endpoint of external elasticearch, such as: https://: + endpoint: "" + ## The external elasticsearch administrative secret + admin_secret_name: "" + + image: + ## Process Federation Server image + repository: cp.icr.io/cp/cp4a/baw/pfs-prod + ## Process Federation Server image tag + tag: "20.0.2" + ## Process Federation Server image pull policy + pull_policy: IfNotPresent + + ## Number of initial Process Federation Server pods + replicas: 1 + ## Service account name for Process Federation Server pod + service_account: + ## Whether Kubernetes can (soft) or must not (hard) deploy Process Federation Server pods onto the same node. Possible values are "soft" and "hard". + anti_affinity: hard + + ## Whether to enable default security roles and possible values are: true and false + enable_default_security_roles: true + ## Name of the secret containing the Process Federation Server administration passwords, such as ltpaPassword, oidcClientPassword, sslKeyPassword + admin_secret_name: ibm-pfs-admin-secret + ## Name of the secret containing the files that will be mounted in the /config/configDropins/overrides folder + config_dropins_overrides_secret: "" + ## Name of the secret containing the files that will be mounted in the /config/resources/security folder + resources_security_secret: "" + ## Name of the custom libraries containing the files that will be mounted in the /config/resources/libs folder + custom_libs_pvc: "" + ## Whether to enable notification server and possible values are: true and false + enable_notification_server: true + ## The secret that contains the Transport Layer Security (TLS) key and certificate for external https visits. You can enter the secret name here. + ## If you do not want to use the customized external TLS certificate, leave it empty. + external_tls_secret: + ## Certificate authority (CA) used to sign the external TLS secret. It is stored in the secret with the TLS key and certificate. You can enter the secret name here. + ## If you don't want to use the customized CA to sign the external TLS certificate, leave it empty. + external_tls_ca_secret: + + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + tls: + ## Existing TLS secret containing tls.key and tls.crt + tls_secret_name: + ## Existing TLS trust secret list + tls_trust_list: + + resources: + requests: + ## Requested amount of CPU for PFS configuration + cpu: 500m + ## Requested amount of memory for PFS configuration + memory: 512Mi + limits: + ## CPU limit for PFS configuration + cpu: 2 + ## Memory limit for PFS configuration + memory: 4Gi + + liveness_probe: + ## Number of seconds after Process Federation Server container starts before the liveness probe is initiated + initial_delay_seconds: 300 + readiness_probe: + ## Number of seconds after Process Federation Server container starts before the readiness probe is initiated + initial_delay_seconds: 240 + + saved_searches: + ## Name of the Elasticsearch index used to store saved searches + index_name: ibmpfssavedsearches + ## Number of shards of the Elasticsearch index used to store saved searches + index_number_of_shards: 3 + ## Number of replicas (pods) of the Elasticsearch index used to store saved searches + index_number_of_replicas: 1 + ## Batch size used when retrieving saved searches + index_batch_size: 100 + ## Amount of time before considering an update lock as expired. Valid values are numbers with a trailing 'm' or 's' for minutes or seconds + update_lock_expiration: 5m + ## Amount of time before considering a unique constraint as expired. Valid values are numbers with a trailing 'm' or 's' for minutes or seconds + unique_constraint_expiration: 5m + + security: + sso: + ## The ssoDomainNames property of the tag + domain_name: + ## The ssoCookieName property of the tag + cookie_name: "ltpatoken2" + ltpa: + ## The keysFileName property of the tag + filename: "ltpa.keys" + ## The expiration property of the tag + expiration: "120m" + ## The monitorInterval property of the tag + monitor_interval: "60s" + ## The sslProtocol property of the tag used as default SSL config + ssl_protocol: SSL + + executor: + ## Value of the maxThreads property of the tag + max_threads: "80" + ## Value of the coreThreads property of the tag + core_threads: "40" + + rest: + ## Value of the userGroupCheckInterval property of the tag + user_group_check_interval: "300s" + ## Value of the systemStatusCheckInterval property of the tag + system_status_check_interval: "60s" + ## Value of the bdFieldsCheckInterval property of the tag + bd_fields_check_interval: "300s" + + custom_env_variables: + ## Names of the custom environment variables defined in the secret referenced in pfs.customEnvVariables.secret + names: + # - name: MY_CUSTOM_ENVIRONMENT_VARIABLE + ## Secret holding custom environment variables + secret: + + ## log trace configuration + logs: + ## Format for printing logs on the console + console_format: "json" + ## Log level for printing logs on the console + console_log_level: "INFO" + ## Source of the logs for printing on the console + console_source: "message,trace,accessLog,ffdc,audit" + ## Format for printing message logs on the console + message_format: "basic" + ## Format for printing trace logs on the console + trace_format: "ENHANCED" + ## Specification for printing trace logs + trace_specification: "*=info" + storage: + ## Use Dynamic Provisioning for PFS Logs Data Storage + use_dynamic_provisioning: true + ## The minimum size of the persistent volume used mounted as PFS Liberty server /logs folder + size: 5Gi + ## Storage class of the persistent volume used mounted as PFS Liberty server /logs folder + storage_class: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname }}" + + ## When PFS is deployed in a environment that includes the Resource Registry , + ## the following additional parameters can be used to configure the integration between PFS and the Resource Registry + dba_resource_registry: + ## Time to live of the lease that creates the PFS entry in the DBA Resource Registry, in seconds. + lease_ttl: 120 + ## The interval at which to check that PFS is running, in seconds. + pfs_check_interval: 10 + ## The number of seconds after which PFS will be considered as not running if no connection can be perfomed + pfs_connect_timeout: 10 + ## The number of seconds after which PFS will be considered as not running if has not yet responded + pfs_response_timeout: 30 + ## The key under which PFS should be registered in the DBA Service Registry when running + pfs_registration_key: /dba/appresources/IBM_PFS/PFS_SYSTEM + resources: + limits: + ## Memory limit for PFS and RR integration pod + memory: '512Mi' + ## CPU limit for PFS and RR integration pod + cpu: '500m' + requests: + ## Requested amount of memory for PFS and RR integration pod + memory: '512Mi' + ## Requested amount of CPU for PFS and RR integration pod + cpu: '200m' + + ######################################################################## + ######## Embedded Elasticsearch configuration ######## + ######################################################################## + elasticsearch_configuration: + es_image: + ## Elasticsearch image + repository: cp.icr.io/cp/cp4a/baw/pfs-elasticsearch-prod + ## Elasticsearch image tag + tag: "20.0.2" + ## Elasticsearch image pull policy + pull_policy: IfNotPresent + es_init_image: + ## The image used by the privileged init container to configure Elasticsearch system settings. + ## This value is only relevant if elasticsearch_configuration.privileged is set to true + repository: cp.icr.io/cp/cp4a/baw/pfs-init-prod + ## The image tag for Elasticsearch init container + tag: "20.0.2" + ## The pull policy for Elasticsearch init container + pull_policy: IfNotPresent + es_nginx_image: + ## The name of the Nginx docker image to be used by Elasticsearch pods + repository: cp.icr.io/cp/cp4a/baw/pfs-nginx-prod + ## The image tag of the Nginx docker image to be used by Elasticsearch pods + tag: "20.0.2" + ## The pull policy for the Nginx docker image to be used by Elasticsearch pods + pull_policy: IfNotPresent + + ## Number of initial Elasticsearch pods + replicas: 1 + ## How the HTTPS endpoint service should be published. The possible values are ClusterIP and NodePort + service_type: ClusterIP + ## The port to which the Elasticsearch server HTTPS endpoint will be exposed externally. + ## This parameter is relevant only if elasticsearch_configuration.service_type is set to NodePort + external_port: + ## The elasticsearch admin secret that contains the username, password and .htpasswd. + ## If not provided, the defualt admin secret named "{{ meta.name }}-elasticsearch-admin-secret" is used. + admin_secret_name: + ## Whether Kubernetes "may" (soft) or "must not" (hard) deploy Elasticsearch pods onto the same node + ## The possible values are "soft" and "hard" + anti_affinity: hard + ## Name of a service account to use. + ## If elasticsearch_configuration.privileged is set to true, then this service account must allow running privileged containers. + ## If not provided, the default service account named "{{ meta.name }}-elasticsearch-service-account" is used. + service_account: + ## When set to true, a privileged container will be created to execute the appropriate sysctl commands so that the node running the pods matches the elasticsearch requirements. + privileged: true + ## Initial delay for liveness and readiness probes of Elasticsearch pods + probe_initial_delay: 90 + ## The JVM heap size to allocate to each Elasticsearch pod + heap_size: "1024m" + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + resources: + limits: + ## Memory limit for Elasticsearch configuration + memory: "2Gi" + ## CPU limit for Elasticsearch configuration + cpu: "1000m" + requests: + ## Requested amount of memory for Elasticsearch configuration + memory: "1Gi" + ## Requested amount of CPU for Elasticsearch configuration + cpu: "100m" + + storage: + ## If persistent the elasticsearch data. Set to false for non-production or trial-only deployment. + persistent: true + ## Set to true to use dynamic storage provisioner + use_dynamic_provisioning: true + ## The minimum size of the persistent volume + size: 10Gi + ## Storage class name for Elasticsearch persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + snapshot_storage: + ## If persistent the elasticsearch snapshot storage. Set to true for production deployment. + enabled: false + ## Set to true to use dynamic storage provisioner + use_dynamic_provisioning: true + ## The minimum size of the persistent volume + size: 30Gi + ## Storage class name for Elasticsearch persistent snapshot storage + storage_class_name: "" + ## By default, a new persistent volume claim is be created. Specify an existing claim here if one is available. + existing_claim_name: "" + + ######################################################################## + ######## IBM FileNet Content Manager initialize configuration ######## + ######################################################################## + initialize_configuration: + ic_domain_creation: + ## Provide a name for the domain + domain_name: "P8DOMAIN" + ## The encryption strength + encryption_key: "128" + ic_ldap_creation: + ## Administrator user + ic_ldap_admin_user_name: + - "" + ## Administrator group + ic_ldap_admins_groups_name: + - "" + ## Name of the LDAP directory + ic_ldap_name: "ldap_name" + ic_obj_store_creation: + object_stores: + ## Configuration for the document object store + ## Display name for the document object store to create + - oc_cpe_obj_store_display_name: "DOCS" + ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "DOCS" + oc_cpe_obj_store_conn: + ## Object store connection name + name: "DOCS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSDOCS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSDOCSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os01_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: false + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 1 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "" + + ## Configuration for the design object store + ## Display name for the design object store to create + - oc_cpe_obj_store_display_name: "DOS" + ## ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "DOS" + oc_cpe_obj_store_conn: + ## ## Object store connection name + name: "DOS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSDOS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSDOSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os02_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: false + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 2 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "" + + ## Configuration for the target object store + ## Display name for the target object store to create + - oc_cpe_obj_store_display_name: "TOS" + ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "TOS" + oc_cpe_obj_store_conn: + ## Object store connection name + name: "TOS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSTOS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSTOSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os03_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: true + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 3 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "cpe_conn_tos" + + + ######################################################################## + ######## IBM Business Automation Insights configuration ######## + ######################################################################## + bai_configuration: + imageCredentials: + registry: cp.icr.io/cp/cp4a + + # Set to true to automatically create the OpenShift routes when sc_deployment_platform is set + # to OCP or ROKS. + createRoutes: false + + # Set to true to enable the Flink job for sending events to HDFS. + ingestion: + install: false + + # Set to true to enable the Flink job for Digital Worker. + adw: + install: false + + # Set to false to disable the Flink job for BAW. + bpmn: + install: true + + # Set to true to enable the Flink job for BAWAdv. + bawadv: + install: false + + # Set to false to disable the Flink job for ICM. + icm: + install: true + + # Set to true to enable the Flink job for ODM. + odm: + install: false + + # Set to true to enable the Flink job for Content. + content: + install: false diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workflow.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workflow.yaml new file mode 100644 index 00000000..45d398d0 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workflow.yaml @@ -0,0 +1,1595 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: workflow + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + shared_configuration: + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: "" + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: "" + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## For non-OCP (e.g., CNCF platforms such as AWS, GKE, etc), this parameter is required + sc_run_as_user: + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/baw/dba-umsregistration-initjob + tag: 20.0.2 + + ## All CP4A components should use this pull_policy as the default, but it can override by each component + pull_policy: IfNotPresent + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "workflow" pattern, which includes the following + ## mandatory components: ban(Business Automation Navigator), ums (User Management Service), rr (Resource registry), app_engine( Application Engine) and optional components: bai + sc_deployment_patterns: workflow + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: bai + sc_optional_components: bai + + ## The deployment type as selected by the user. Possible values are: demo, enterprise + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## Enable/disable ECM (FNCM) / BAN initialization (e.g., creation of P8 domain, creation/configuration of object stores, + ## creation/configuration of CSS servers, and initialization of Navigator (ICN)). If the "initialize_configuration" section + ## is defined in the CR, then that configuration will take precedence overriding this parameter. + sc_content_initialization: false + ## Enable/disable the ECM (FNCM) / BAN verification (e.g., creation of test folder, creation of test document, + ## execution of CBR search, and creation of Navigator demo repository and desktop). If the "verify_configuration" + ## section is defined in the CR, then that configuration will take precedence overriding this parameter. + sc_content_verification: false + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + # Kafka client configuration for IBM Business Automation Insights and other ICP4A products. + # + # The customization of the following 4 parameters is "" only if you have + # specificed "bai" as part of the sc_optional_components to specify that Business Automation + # Insights must be installed. + # + # Otherwise, if Business Automation Insights is not being installed, there is no need to configure + # these parameters and they can be kept empty. + ############################################################################################## + kafka_configuration: + # Comma-separated list of hosts:port for connection to the Kafka cluster. + # This field is mandatory for any Kafka configuration. + bootstrap_servers: "" + # Value for the Kafka security.protocol property + # Valid values: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + security_protocol: + # Value for the Kafka sasl.mechanism property + # Valid values: PLAIN, SCRAM-SHA-512. Default: PLAIN. + sasl_mechanism: + # If the Kafka server requires authentication or uses SSL communications, the value of this field + # must provide the name of a secret that holds the following keys as base64-encoded strings: + # kafka-username: Kafka username; leave empty if no authentication + # kafka-password: Kafka password; leave empty if no authentication + # kafka-server-certificate: server certificate for SSL communications; leave empty if SSL protocol is not used + connection_secret_name: + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for the GCD datasource for CPE + dc_gcd_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". + dc_database_type: "" + ## The GCD non-XA datasource name. The default value is "FNGCDDS". + dc_common_gcd_datasource_name: "FNGCDDS" + ## The GCD XA datasource name. The default value is "FNGCDDSXA". + dc_common_gcd_xa_datasource_name: "FNGCDDSXA" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the name of the database for the GCD for CPE. For example: "GCDDB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_gcd_jdbc_url: "" + + ## If the database type is Db2 HADR, then complete the rest of the parameters below. + ## Provide the database server name or IP address of the standby database server. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for the document object store (DOCS) datasource for CPE + dc_os_datasources: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD configuration above. + - dc_database_type: "" + ## The DOCS non-XA datasource name. The default value is "FNDSDOCS". + dc_common_os_datasource_name: "FNDSDOCS" + ## The DOCS XA datasource name. The default value is "FNDSDOCSXA". + dc_common_os_xa_datasource_name: "FNDSDOCSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for the target object store (TOS) datasource for CPE + - dc_database_type: "" + ## The TOS non-XA datasource name. The default value is "FNDSTOS". + dc_common_os_datasource_name: "FNDSTOS" + ## The TOS XA datasource name. The default value is "FNDSTOSXA". + dc_common_os_xa_datasource_name: "FNDSTOSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for the design object store (DOS) datasource for CPE + - dc_database_type: "" + ## The DOS non-XA datasource name. The default value is "FNDSDOS". + dc_common_os_datasource_name: "FNDSDOS" + ## The DOS XA datasource name. The default value is "FNDSDOSXA". + dc_common_os_xa_datasource_name: "FNDSDOSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for UMS (User Management Service) + dc_ums_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_oauth_alternate_hosts and dc_ums_oauth_alternate_ports + ## are set. + dc_ums_oauth_type: "" + ## Provide the database server name or IP address of the database server. + dc_ums_oauth_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_oauth_port: "" + ## Provide the name of the database for UMS. For example: "UMSDB" + dc_ums_oauth_name: "" + dc_ums_oauth_schema: OAuthDBSchema + dc_ums_oauth_ssl: true + dc_ums_oauth_ssl_secret_name: "" + dc_ums_oauth_driverfiles: + dc_ums_oauth_alternate_hosts: + dc_ums_oauth_alternate_ports: + + ## The database database configuration for teamserver + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_teamserver_alternate_hosts and dc_ums_teamserver_alternate_ports + ## are set. + dc_ums_teamserver_type: "" + dc_ums_teamserver_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_teamserver_port: "" + ## Provide the name of the database for UMS teamserver. For example: "UMSDB" + dc_ums_teamserver_name: "" + dc_ums_teamserver_ssl: true + dc_ums_teamserver_ssl_secret_name: "" + dc_ums_teamserver_driverfiles: + dc_ums_teamserver_alternate_hosts: + dc_ums_teamserver_alternate_ports: + + + ######################################################################## + ######## IBM Business Automation Workflow configuration ######## + ######################################################################## + baw_configuration: + ## The baw_configuration is a list. You can deploy multiple instances of Workflow server and assign different configurations for each instance. + ## For each instance, baw_configuration.name and baw_configuration.name.hostname must have different values. + - name: instance1 + ## If config the Process Portal for a federated environment + host_federated_portal: true + ## Workflow server service type. + service_type: "Route" + ## Workflow server hostname + hostname: "" + ## Workflow server port + port: 443 + ## Workflow server nodeport + nodeport: 30026 + ## Workflow server environment type. The possible value are "Development" or "Test" or "Staging" or "Production" + env_type: "Production" + ## Workflow server capability + capabilities: "workflow" + ## Workflow server replica count + replicas: 1 + ## Provide Workflow server default administrator ID + admin_user: "" + ## The name of Workflow server admin secret + admin_secret_name: "baw-admin-secret" + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + # For scenario that customer has implemented their own Portal. E,g https://portal.mycompany.com + customized_portal_endpoint: "" + federated_portal: + ## Content security policy additional origins for federate on premise BAW systems. E.g ["https://on-prem-baw1","https://on-prem-baw2"] + content_security_policy_additional_origins: [] + external_connection_timeout: "" + + tls: + ## Workflow server TLS secret that contains tls.key and tls.crt. + tls_secret_name: ibm-baw-tls + ## Workflow server TLS trust list. + tls_trust_list: + + image: + ## Workflow image repository URL + repository: cp.icr.io/cp/cp4a/baw/workflow-server + ## Image tag for Workflow server container + tag: 20.0.2 + ## Pull policy for Workflow container + pullPolicy: IfNotPresent + pfs_bpd_database_init_job: + ## Database initialization image repository URL for Process Federation Server + repository: cp.icr.io/cp/cp4a/baw/pfs-bpd-database-init-prod + ## Image tag for database initialization for Process Federation Server + tag: 20.0.2 + ## Pull policy for Process Federation Server database initialization image + pullPolicy: IfNotPresent + upgrade_job: + ## Workflow server database handling image repository URL + repository: cp.icr.io/cp/cp4a/baw/workflow-server-dbhandling + ## Image tag for Workflow server database handling + tag: 20.0.2 + ## Pull policy for Workflow server database handling + pullPolicy: IfNotPresent + bas_auto_import_job: + ## BAS toolkit init image repository URL + repository: cp.icr.io/cp/cp4a/baw/toolkit-installer + ## Image tag for BAS toolkit init image + tag: 20.0.2 + ## Pull policy for BAS toolkit init image + pullPolicy: IfNotPresent + + ## The database configuration for Workflow server + database: + ## Whether to enable Secure Sockets Layer (SSL) support for the Workflow server database connection + ssl: false + ## Secret name for storing the database TLS certificate when an SSL connection is enabled + sslsecretname: "" + ## Workflow server database type + type: "DB2" + ## Workflow server database server name. + server_name: "" + ## Workflow server database name + database_name: "" + ## Workflow server database port. For DB2, the default value is "50000" + port: "" + ## Workflow server database secret name + secret_name: "" + ## Workflow server database connect pool maximum number of physical connections + cm_max_pool_size: 200 + dbcheck: + # The maximum waiting time (seconds) to check the database intialization status + wait_time: 900 + # The interval time (seconds) to check. + interval_time: 15 + hadr: + ## Database standby host for high availability disaster recovery (HADR) + ## To enable database HADR, configure both standby host and port + standbydb_host: + ## Database standby port for HADR + standbydb_port: + ## Retry interval for HADR + retryinterval: + ## Maximum retries for HADR + maxretries: + + ## The configurations for content integration + content_integration: + init_job_image: + ## Image name for content integration container. + repository: cp.icr.io/cp/cp4a/baw/iaws-ps-content-integration + ## Image tag for content integration container + tag: 20.0.2 + ## Pull policy for content integration container. + pull_policy: IfNotPresent + ## Domain name for content integration + domain_name: "" + ## Object Store name for content integration + object_store_name: "" + ## Admin secret for content integration + cpe_admin_secret: "" + + ## The configuration for case + case: + init_job_image: + ## Image name for CASE init job container. + repository: cp.icr.io/cp/cp4a/baw/workflow-server-case-initialization + ## Image tag for CASE init job container. + tag: 20.0.2 + ## Pull policy for CASE init job container. + pull_policy: IfNotPresent + + ## Domain name for CASE + domain_name: "P8DOMAIN" + ## Design Object Store name of CASE + object_store_name_dos: "DOS" + ## Target Object Store name of CASE + object_store_name_tos: "TOS" + ## Connection point name for Target Object Store + connection_point_name_tos: "cpe_conn_tos" + + ## PVC name for CASE network shared directory + network_shared_directory_pvc: "" + ## The custom package names if need to install custom package, the value format like "package1.zip, package2,zip" + custom_package_names: "" + ## The custom extension names if need to install custom extension, the value format like "extension1.zip, extension2,zip" + custom_extension_names: "" + ## The event emitter settings if you want to enable Case Event Emitter + event_emitter: + date_sql: + logical_unique_id: + solution_list: + + ## Workflow center configuration + workflow_center: + ## The URL of workflow center + url: "" + # The secret name of workflow center that contains username and password + secret_name: "" + # The hearbeat interval(seconds) to connect to workflow center + heartbeat_interval: 30 + + ## The configuration for Resource Registry if you want to use external Resource Registry + resource_registry: + ## Resource Registry host name + hostname: "" + ## Resource Registry port + port: 443 + ## Resource Registry administrative secret + admin_secret_name: "" + + ## The configuration for Java Messaging Service(JMS) + jms: + image: + ## Image name for Java Messaging Service container + repository: cp.icr.io/cp/cp4a/baw/jms + ## Image tag for Java Messaging Service container + tag: 20.0.2 + ## Pull policy for Java Messaging Service container + pull_policy: IfNotPresent + tls: + ## TLS secret name for Java Message Service (JMS) + tls_secret_name: ibm-jms-tls-secret + resources: + limits: + ## Memory limit for JMS configuration + memory: "2Gi" + ## CPU limit for JMS configuration + cpu: "1000m" + requests: + ## Requested amount of memory for JMS configuration + memory: "512Mi" + ## Requested amount of CPU for JMS configuration + cpu: "200m" + storage: + ## Whether to enable persistent storage for JMS + persistent: true + ## Size for JMS persistent storage + size: "1Gi" + ## Whether to enable dynamic provisioning for JMS persistent storage + use_dynamic_provisioning: true + ## Access modes for JMS persistent storage + access_modes: + - ReadWriteOnce + ## Storage class name for JMS persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + ## Resource configuration + resources: + limits: + ## CPU limit for Workflow server. + cpu: 2 + ## Memory limit for Workflow server + memory: 2096Mi + requests: + ## Requested amount of CPU for Workflow server + cpu: "500m" + ## Requested amount of memory for Workflow server. + memory: 1048Mi + + ## liveness and readiness probes configuration + probe: + ws: + liveness_probe: + ## Number of seconds after the Workflow server container starts before the liveness probe is initiated + initial_delay_seconds: 300 + readinessProbe: + ## Number of seconds after the Workflow server container starts before the readiness probe is initiated + initial_delay_seconds: 240 + + ## log trace configuration + logs: + ## Format for printing logs on the console + console_format: "json" + ## Log level for printing logs on the console + console_log_level: "INFO" + ## Source of the logs for printing on the console + console_source: "message,trace,accessLog,ffdc,audit" + ## Format for printing message logs on the console + message_format: "basic" + ## Format for printing trace logs on the console + trace_format: "ENHANCED" + ## Specification for printing trace logs + trace_specification: "*=info" + + ## storage configuration + storage: + ## Set to true to use dynamic storage provisioner. If set to false, then need set existing_pvc_for_logstore and existing_pvc_for_dumpstore + use_dynamic_provisioning: true + ## The persistent volume claim for logs + existing_pvc_for_logstore: "" + ## The minimum size of the persistent volume used mounted as log store + size_for_logstore: "10Gi" + ## The persistent volume claim for dump files + existing_pvc_for_dumpstore: "" + ## The minimum size of the persistent volume used mounted as dump store + size_for_dumpstore: "10Gi" + + ## JVM options separated with space, for example: -Dtest1=test -Dtest2=test2 + jvm_customize_options: + + ## Workflow server custom plain XML snippet + ## liberty_custom_xml: |+ + ## + ## + ## + liberty_custom_xml: + + ## Workflow server custom XML secret name that contains custom configuraiton in Liberty server.xml + custom_xml_secret_name: + + ## Workflow server Lombardi custom XML secret name that contains custom configuraiton in 100Custom.xml + lombardi_custom_xml_secret_name: + + ## IBM Business Automation Insights integration configuration + business_event: + enable: false + enable_task_record: true + enable_task_api: false + subscription: + - {'app_name': '*','version': '*','component_type': '*','component_name': '*','element_type': '*','element_name': '*','nature': '*'} + + ######################################################################## + ######## IBM FileNet Content Manager configuration ######## + ######################################################################## + ecm_configuration: + + ## FNCM secret that contains GCD DB user name and password, Object Store DB user name and password, + ## LDAP user and password, CPE username and password, keystore password, and LTPA passs, etc. + fncm_secret_name: ibm-fncm-secret + + #################################### + ## Start of configuration for CPE ## + #################################### + cpe: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cpe + tag: ga-555-p8cpe + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 3072Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CPE Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cpe_production_setting: + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 18 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 33 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + ## Default JNDI name for GCD for non-XA data source + gcd_jndi_name: FNGCDDS + ## Default JNDI name for GCD for XA data source + gcd_jndixa_name: FNGCDDSXA + license_model: FNCM.PVUNonProd + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: true + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: true + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: true + + ## Persistent Volume Claims for CPE. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cpe_cfgstore: "cpe-cfgstore" + existing_pvc_for_cpe_logstore: "cpe-logstore" + existing_pvc_for_cpe_filestore: "cpe-filestore" + existing_pvc_for_cpe_icmrulestore: "cpe-icmrulesstore" + existing_pvc_for_cpe_textextstore: "cpe-textextstore" + existing_pvc_for_cpe_bootstrapstore: "cpe-bootstrapstore" + existing_pvc_for_cpe_fnlogstore: "cpe-fnlogstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ##################################### + ## Start of configuration for CMIS ## + ##################################### + cmis: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 256Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CMIS Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cmis_production_setting: + ## By default, this parameter is set by the Operator using the CPE service endpoint (e.g., "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM") + cpe_url: + + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + checkout_copycontent: true + default_maxitems: 25 + cvl_cache: true + secure_metadata_cache: false + filter_hidden_properties: true + querytime_limit: 180 + resumable_queries_forrest: true + escape_unsafe_string_characters: false + max_soap_size: 180 + print_pull_stacktrace: false + folder_first_search: false + ignore_root_documents: false + supporting_type_mutability: false + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: true + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: true + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: true + + ## Persistent Volume Claims for CMIS. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cmis_cfgstore: "cmis-cfgstore" + existing_pvc_for_cmis_logstore: "cmis-logstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 90 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 180 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + + ## Navigator secret that contains user credentials for LDAP and database + ban_secret_name: ibm-ban-secret + + ## The architecture of the cluster. This is the default for Linux and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + + ## The default repository is the IBM Entitled Registry + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default ICN Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + icn_production_setting: + timezone: Etc/UTC + jvm_initial_heap_percentage: 40 + jvm_max_heap_percentage: 66 + jvm_customize_options: + icn_db_type: db2 + icn_jndids_name: ECMClientDS + icn_schema: ICNDB + icn_table_space: ICNDB + allow_remote_plugins_via_http: false + + ## Default settings for monitoring + monitor_enabled: false + ## Default settings for logging + logging_enabled: false + + ## Persistent Volume Claims for ICN. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_icn_cfgstore: "icn-cfgstore" + existing_pvc_for_icn_logstore: "icn-logstore" + existing_pvc_for_icn_pluginstore: "icn-pluginstore" + existing_pvc_for_icnvw_cachestore: "icn-vw-cachestore" + existing_pvc_for_icnvw_logstore: "icn-vw-logstore" + existing_pvc_for_icn_aspera: "icn-asperastore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + existing_claim_name: + replica_count: 2 + service_type: Route + # your external UMS host name, only required if there is no sc_deployment_hostname_suffix given + hostname: + port: 443 + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + admin_secret_name: ibm-dba-ums-secret + ## optional for secure communication with UMS + external_tls_secret_name: + ## optional for secure communication with UMS + external_tls_ca_secret_name: + ## optional for secure communication with UMS + external_tls_teams_secret_name: + ## optional for secure communication with UMS + external_tls_scim_secret_name: + ## optional for secure communication with UMS + external_tls_sso_secret_name: + oauth: + ## optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret + client_manager_group: + ## optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret + token_manager_group: + ## optional: lifetime of OAuth access_tokens. default is 7200s + access_token_lifetime: + ## optional: lifetime of app-tokens. default is 366d + app_token_lifetime: + ## optional: lifetime of app-passwords. default is 366d + app_password_lifetime: + ## optional: maximimum number of app-tokens or app-passwords per client. default is 100 + app_token_or_password_limit: + ## optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 + client_secret_encoding: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + ## Horizontal Pod Autoscaler + autoscaling: + enabled: true + min_replicas: 2 + max_replicas: 5 + target_average_utilization: 98 + use_custom_jdbc_drivers: false + use_custom_binaries: false + custom_secret_name: + custom_xml: + logs: + console_format: json + console_log_level: INFO + console_source: message,trace,accessLog,ffdc,audit + trace_format: ENHANCED + trace_specification: "*=info" + + ######################################################################## + ######## Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + pull_policy: IfNotPresent + resource_registry: + repository: cp.icr.io/cp/cp4a/baw/dba-etcd + tag: 20.0.2 + admin_secret_name: resource-registry-admin-secret + replica_size: 3 + probe: + liveness: + initial_delay_seconds: 60 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + readiness: + initial_delay_seconds: 10 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + resource: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "128Mi" + auto_backup: + enable: true + minimal_time_interval: 300 + pvc_name: "{{ meta.name }}-dba-rr-pvc" + dynamic_provision: + enable: true + size: 3Gi + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + + ######################################################################## + ######## IBM Process Federation Server configuration ######## + ######################################################################## + pfs_configuration: + ## Process Federation Server hostname + hostname: "" + ## Process Federation Server port + port: 443 + ## How the HTTPS endpoint service should be published. Possible values are ClusterIP, NodePort, Route + service_type: Route + + ## If use the external elasticsearch server, provide the following configuration + external_elasticsearch: + ## The endpoint of external elasticearch, such as: https://: + endpoint: "" + ## The external elasticsearch administrative secret + admin_secret_name: "" + + image: + ## Process Federation Server image + repository: cp.icr.io/cp/cp4a/baw/pfs-prod + ## Process Federation Server image tag + tag: "20.0.2" + ## Process Federation Server image pull policy + pull_policy: IfNotPresent + + ## Number of initial Process Federation Server pods + replicas: 1 + ## Service account name for Process Federation Server pod + service_account: + ## Whether Kubernetes can (soft) or must not (hard) deploy Process Federation Server pods onto the same node. Possible values are "soft" and "hard". + anti_affinity: hard + + ## Whether to enable default security roles and possible values are: true and false + enable_default_security_roles: true + ## Name of the secret containing the Process Federation Server administration passwords, such as ltpaPassword, oidcClientPassword, sslKeyPassword + admin_secret_name: ibm-pfs-admin-secret + ## Name of the secret containing the files that will be mounted in the /config/configDropins/overrides folder + config_dropins_overrides_secret: "" + ## Name of the secret containing the files that will be mounted in the /config/resources/security folder + resources_security_secret: "" + ## Name of the custom libraries containing the files that will be mounted in the /config/resources/libs folder + custom_libs_pvc: "" + ## Whether to enable notification server and possible values are: true and false + enable_notification_server: false + ## The secret that contains the Transport Layer Security (TLS) key and certificate for external https visits. You can enter the secret name here. + ## If you do not want to use the customized external TLS certificate, leave it empty. + external_tls_secret: + ## Certificate authority (CA) used to sign the external TLS secret. It is stored in the secret with the TLS key and certificate. You can enter the secret name here. + ## If you don't want to use the customized CA to sign the external TLS certificate, leave it empty. + external_tls_ca_secret: + + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + tls: + ## Existing TLS secret containing tls.key and tls.crt + tls_secret_name: + ## Existing TLS trust secret list + tls_trust_list: + + resources: + requests: + ## Requested amount of CPU for PFS configuration + cpu: 500m + ## Requested amount of memory for PFS configuration + memory: 512Mi + limits: + ## CPU limit for PFS configuration + cpu: 2 + ## Memory limit for PFS configuration + memory: 4Gi + + liveness_probe: + ## Number of seconds after Process Federation Server container starts before the liveness probe is initiated + initial_delay_seconds: 300 + readiness_probe: + ## Number of seconds after Process Federation Server container starts before the readiness probe is initiated + initial_delay_seconds: 240 + + saved_searches: + ## Name of the Elasticsearch index used to store saved searches + index_name: ibmpfssavedsearches + ## Number of shards of the Elasticsearch index used to store saved searches + index_number_of_shards: 3 + ## Number of replicas (pods) of the Elasticsearch index used to store saved searches + index_number_of_replicas: 1 + ## Batch size used when retrieving saved searches + index_batch_size: 100 + ## Amount of time before considering an update lock as expired. Valid values are numbers with a trailing 'm' or 's' for minutes or seconds + update_lock_expiration: 5m + ## Amount of time before considering a unique constraint as expired. Valid values are numbers with a trailing 'm' or 's' for minutes or seconds + unique_constraint_expiration: 5m + + security: + sso: + ## The ssoDomainNames property of the tag + domain_name: + ## The ssoCookieName property of the tag + cookie_name: "ltpatoken2" + ltpa: + ## The keysFileName property of the tag + filename: "ltpa.keys" + ## The expiration property of the tag + expiration: "120m" + ## The monitorInterval property of the tag + monitor_interval: "60s" + ## The sslProtocol property of the tag used as default SSL config + ssl_protocol: SSL + + executor: + ## Value of the maxThreads property of the tag + max_threads: "80" + ## Value of the coreThreads property of the tag + core_threads: "40" + + rest: + ## Value of the userGroupCheckInterval property of the tag + user_group_check_interval: "300s" + ## Value of the systemStatusCheckInterval property of the tag + system_status_check_interval: "60s" + ## Value of the bdFieldsCheckInterval property of the tag + bd_fields_check_interval: "300s" + + custom_env_variables: + ## Names of the custom environment variables defined in the secret referenced in pfs.customEnvVariables.secret + names: + # - name: MY_CUSTOM_ENVIRONMENT_VARIABLE + ## Secret holding custom environment variables + secret: + + ## log trace configuration + logs: + ## Format for printing logs on the console + console_format: "json" + ## Log level for printing logs on the console + console_log_level: "INFO" + ## Source of the logs for printing on the console + console_source: "message,trace,accessLog,ffdc,audit" + ## Format for printing message logs on the console + message_format: "basic" + ## Format for printing trace logs on the console + trace_format: "ENHANCED" + ## Specification for printing trace logs + trace_specification: "*=info" + storage: + ## Use Dynamic Provisioning for PFS Logs Data Storage + use_dynamic_provisioning: true + ## The minimum size of the persistent volume used mounted as PFS Liberty server /logs folder + size: 5Gi + ## Storage class of the persistent volume used mounted as PFS Liberty server /logs folder + storage_class: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname }}" + + ## When PFS is deployed in a environment that includes the Resource Registry , + ## the following additional parameters can be used to configure the integration between PFS and the Resource Registry + dba_resource_registry: + ## Time to live of the lease that creates the PFS entry in the DBA Resource Registry, in seconds. + lease_ttl: 120 + ## The interval at which to check that PFS is running, in seconds. + pfs_check_interval: 10 + ## The number of seconds after which PFS will be considered as not running if no connection can be perfomed + pfs_connect_timeout: 10 + ## The number of seconds after which PFS will be considered as not running if has not yet responded + pfs_response_timeout: 30 + ## The key under which PFS should be registered in the DBA Service Registry when running + pfs_registration_key: /dba/appresources/IBM_PFS/PFS_SYSTEM + resources: + limits: + ## Memory limit for PFS and RR integration pod + memory: '512Mi' + ## CPU limit for PFS and RR integration pod + cpu: '500m' + requests: + ## Requested amount of memory for PFS and RR integration pod + memory: '512Mi' + ## Requested amount of CPU for PFS and RR integration pod + cpu: '200m' + + ######################################################################## + ######## Embedded Elasticsearch configuration ######## + ######################################################################## + elasticsearch_configuration: + es_image: + ## Elasticsearch image + repository: cp.icr.io/cp/cp4a/baw/pfs-elasticsearch-prod + ## Elasticsearch image tag + tag: "20.0.2" + ## Elasticsearch image pull policy + pull_policy: IfNotPresent + es_init_image: + ## The image used by the privileged init container to configure Elasticsearch system settings. + ## This value is only relevant if elasticsearch_configuration.privileged is set to true + repository: cp.icr.io/cp/cp4a/baw/pfs-init-prod + ## The image tag for Elasticsearch init container + tag: "20.0.2" + ## The pull policy for Elasticsearch init container + pull_policy: IfNotPresent + es_nginx_image: + ## The name of the Nginx docker image to be used by Elasticsearch pods + repository: cp.icr.io/cp/cp4a/baw/pfs-nginx-prod + ## The image tag of the Nginx docker image to be used by Elasticsearch pods + tag: "20.0.2" + ## The pull policy for the Nginx docker image to be used by Elasticsearch pods + pull_policy: IfNotPresent + + ## Number of initial Elasticsearch pods + replicas: 1 + ## How the HTTPS endpoint service should be published. The possible values are ClusterIP and NodePort + service_type: ClusterIP + ## The port to which the Elasticsearch server HTTPS endpoint will be exposed externally. + ## This parameter is relevant only if elasticsearch_configuration.service_type is set to NodePort + external_port: + ## The elasticsearch admin secret that contains the username, password and .htpasswd. + ## If not provided, the defualt admin secret named "{{ meta.name }}-elasticsearch-admin-secret" is used. + admin_secret_name: + ## Whether Kubernetes "may" (soft) or "must not" (hard) deploy Elasticsearch pods onto the same node + ## The possible values are "soft" and "hard" + anti_affinity: hard + ## Name of a service account to use. + ## If elasticsearch_configuration.privileged is set to true, then this service account must allow running privileged containers. + ## If not provided, the default service account named "{{ meta.name }}-elasticsearch-service-account" is used. + service_account: + ## When set to true, a privileged container will be created to execute the appropriate sysctl commands so that the node running the pods matches the elasticsearch requirements. + privileged: true + ## Initial delay for liveness and readiness probes of Elasticsearch pods + probe_initial_delay: 90 + ## The JVM heap size to allocate to each Elasticsearch pod + heap_size: "1024m" + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + resources: + limits: + ## Memory limit for Elasticsearch configuration + memory: "2Gi" + ## CPU limit for Elasticsearch configuration + cpu: "1000m" + requests: + ## Requested amount of memory for Elasticsearch configuration + memory: "1Gi" + ## Requested amount of CPU for Elasticsearch configuration + cpu: "100m" + + storage: + ## If persistent the elasticsearch data. Set to false for non-production or trial-only deployment. + persistent: true + ## Set to true to use dynamic storage provisioner + use_dynamic_provisioning: true + ## The minimum size of the persistent volume + size: 10Gi + ## Storage class name for Elasticsearch persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + snapshot_storage: + ## If persistent the elasticsearch snapshot storage. Set to true for production deployment. + enabled: false + ## Set to true to use dynamic storage provisioner + use_dynamic_provisioning: true + ## The minimum size of the persistent volume + size: 30Gi + ## Storage class name for Elasticsearch persistent snapshot storage + storage_class_name: "" + ## By default, a new persistent volume claim is be created. Specify an existing claim here if one is available. + existing_claim_name: "" + + + ######################################################################## + ######## IBM FileNet Content Manager initialize configuration ######## + ######################################################################## + initialize_configuration: + ic_domain_creation: + ## Provide a name for the domain + domain_name: "P8DOMAIN" + ## The encryption strength + encryption_key: "128" + ic_ldap_creation: + ## Administrator user + ic_ldap_admin_user_name: + - "" + ## Administrator group + ic_ldap_admins_groups_name: + - "" + ## Name of the LDAP directory + ic_ldap_name: "ldap_name" + ic_obj_store_creation: + object_stores: + ## Configuration for the document object store + ## Display name for the document object store to create + - oc_cpe_obj_store_display_name: "DOCS" + ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "DOCS" + oc_cpe_obj_store_conn: + ## Object store connection name + name: "DOCS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSDOCS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSDOCSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os01_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: false + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 1 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "" + + ## Configuration for the design object store + ## Display name for the design object store to create + - oc_cpe_obj_store_display_name: "DOS" + ## ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "DOS" + oc_cpe_obj_store_conn: + ## ## Object store connection name + name: "DOS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSDOS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSDOSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os02_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: false + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 2 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "" + + ## Configuration for the target object store + ## Display name for the target object store to create + - oc_cpe_obj_store_display_name: "TOS" + ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "TOS" + oc_cpe_obj_store_conn: + ## Object store connection name + name: "TOS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSTOS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSTOSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os03_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: true + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 3 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "cpe_conn_tos" + + ######################################################################## + ######## IBM Business Automation Insights configuration ######## + ######################################################################## + bai_configuration: + imageCredentials: + registry: cp.icr.io/cp/cp4a + + # Set to true to automatically create the OpenShift routes when sc_deployment_platform is set + # to OCP or ROKS. + createRoutes: false + + # Set to true to enable the Flink job for sending events to HDFS. + ingestion: + install: false + + # Set to true to enable the Flink job for Digital Worker. + adw: + install: false + + # Set to false to disable the Flink job for BAW. + bpmn: + install: true + + # Set to true to enable the Flink job for BAWAdv. + bawadv: + install: false + + # Set to false to disable the Flink job for ICM. + icm: + install: true + + # Set to true to enable the Flink job for ODM. + odm: + install: false + + # Set to true to enable the Flink job for Content. + content: + install: false diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workstreams.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workstreams.yaml new file mode 100644 index 00000000..d3269f62 --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_FC_workstreams.yaml @@ -0,0 +1,1663 @@ + ############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: workstreams + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + shared_configuration: + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: "" + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: "" + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## For non-OCP (e.g., CNCF platforms such as AWS, GKE, etc), this parameter is required + sc_run_as_user: + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/baw/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/baw/dba-umsregistration-initjob + tag: 20.0.2 + + ## All CP4A components should use this pull_policy as the default, but it can override by each component + pull_policy: IfNotPresent + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "workstreams" pattern, which includes the following + ## mandatory components: ban(Business Automation Navigator), ums (User Management Service), rr (Resource registry), app_engine( Application Engine) + sc_deployment_patterns: workstreams + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: bai + sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: demo, enterprise + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + ############################################################################################## + # Kafka client configuration for IBM Business Automation Insights and other ICP4A products. + # + # The customization of the following 4 parameters is "" only if you have + # specificed "bai" as part of the sc_optional_components to specify that Business Automation + # Insights must be installed. + # + # Otherwise, if Business Automation Insights is not being installed, there is no need to configure + # these parameters and they can be kept empty. + ############################################################################################## + kafka_configuration: + # Comma-separated list of hosts:port for connection to the Kafka cluster. + # This field is mandatory for any Kafka configuration. + bootstrap_servers: "" + # Value for the Kafka security.protocol property + # Valid values: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + security_protocol: + # Value for the Kafka sasl.mechanism property + # Valid values: PLAIN, SCRAM-SHA-512. Default: PLAIN. + sasl_mechanism: + # If the Kafka server requires authentication or uses SSL communications, the value of this field + # must provide the name of a secret that holds the following keys as base64-encoded strings: + # kafka-username: Kafka username; leave empty if no authentication + # kafka-password: Kafka password; leave empty if no authentication + # kafka-server-certificate: server certificate for SSL communications; leave empty if SSL protocol is not used + connection_secret_name: + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for the GCD datasource for CPE + dc_gcd_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". + dc_database_type: "" + ## The GCD non-XA datasource name. The default value is "FNGCDDS". + dc_common_gcd_datasource_name: "FNGCDDS" + ## The GCD XA datasource name. The default value is "FNGCDDSXA". + dc_common_gcd_xa_datasource_name: "FNGCDDSXA" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the name of the database for the GCD for CPE. For example: "GCDDB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_gcd_jdbc_url: "" + + ## If the database type is Db2 HADR, then complete the rest of the parameters below. + ## Provide the database server name or IP address of the standby database server. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for the document object store (DOCS) datasource for CPE + dc_os_datasources: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD configuration above. + - dc_database_type: "" + ## The DOCS non-XA datasource name. The default value is "FNDSDOCS". + dc_common_os_datasource_name: "FNDSDOCS" + ## The DOCS XA datasource name. The default value is "FNDSDOCSXA". + dc_common_os_xa_datasource_name: "FNDSDOCSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for the target object store (TOS) datasource for CPE + - dc_database_type: "" + ## The TOS non-XA datasource name. The default value is "FNDSTOS". + dc_common_os_datasource_name: "FNDSTOS" + ## The TOS XA datasource name. The default value is "FNDSTOSXA". + dc_common_os_xa_datasource_name: "FNDSTOSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for the design object store (DOS) datasource for CPE + - dc_database_type: "" + ## The DOS non-XA datasource name. The default value is "FNDSDOS". + dc_common_os_datasource_name: "FNDSDOS" + ## The DOS XA datasource name. The default value is "FNDSDOSXA". + dc_common_os_xa_datasource_name: "FNDSDOSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for UMS (User Management Service) + dc_ums_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_oauth_alternate_hosts and dc_ums_oauth_alternate_ports + ## are set. + dc_ums_oauth_type: "" + ## Provide the database server name or IP address of the database server. + dc_ums_oauth_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_oauth_port: "" + ## Provide the name of the database for UMS. For example: "UMSDB" + dc_ums_oauth_name: "" + dc_ums_oauth_schema: OAuthDBSchema + dc_ums_oauth_ssl: true + dc_ums_oauth_ssl_secret_name: "" + dc_ums_oauth_driverfiles: + dc_ums_oauth_alternate_hosts: + dc_ums_oauth_alternate_ports: + + ## The database database configuration for teamserver + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_teamserver_alternate_hosts and dc_ums_teamserver_alternate_ports + ## are set. + dc_ums_teamserver_type: "" + dc_ums_teamserver_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_teamserver_port: "" + ## Provide the name of the database for UMS teamserver. For example: "UMSDB" + dc_ums_teamserver_name: "" + dc_ums_teamserver_ssl: true + dc_ums_teamserver_ssl_secret_name: "" + dc_ums_teamserver_driverfiles: + dc_ums_teamserver_alternate_hosts: + dc_ums_teamserver_alternate_ports: + + + ######################################################################## + ######## IBM Business Automation Workflow configuration ######## + ######################################################################## + baw_configuration: + ## The baw_configuration is a list. You can deploy multiple instances of Workflow server and assign different configurations for each instance. + ## For each instance, baw_configuration.name and baw_configuration.name.hostname must have different values. + - name: instance1 + ## If config the Process Portal for a federated environment + host_federated_portal: true + ## Workflow server service type. + service_type: "Route" + ## Workflow server hostname + hostname: "" + ## Workflow server port + port: 443 + ## Workflow server nodeport + nodeport: 30026 + ## Workflow server environment type. The possible value are "Development" or "Test" or "Staging" or "Production" + env_type: "Production" + ## Workflow server capability + capabilities: "workstreams" + ## Workflow server replica count + replicas: 1 + ## Provide Workflow server default administrator ID + admin_user: "" + ## The name of Workflow server admin secret + admin_secret_name: "baw-admin-secret" + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + ## For scenario that customer has implemented their own Portal. E,g https://portal.mycompany.com + customized_portal_endpoint: "" + + federated_portal: + ## Content security policy additional origins for federate on premise BAW systems. E.g ["https://on-prem-baw1","https://on-prem-baw2"] + content_security_policy_additional_origins: [] + external_connection_timeout: "" + + tls: + ## Workflow server TLS secret that contains tls.key and tls.crt. + tls_secret_name: ibm-baw-tls + ## Workflow server TLS trust list. + tls_trust_list: + image: + ## Workflow image repository URL + repository: cp.icr.io/cp/cp4a/baw/workflow-server + ## Image tag for Workflow server container + tag: 20.0.2 + ## Pull policy for Workflow container + pullPolicy: IfNotPresent + pfs_bpd_database_init_job: + ## Database initialization image repository URL for Process Federation Server + repository: cp.icr.io/cp/cp4a/baw/pfs-bpd-database-init-prod + ## Image tag for database initialization for Process Federation Server + tag: 20.0.2 + ## Pull policy for Process Federation Server database initialization image + pullPolicy: IfNotPresent + upgrade_job: + ## Workflow server database handling image repository URL + repository: cp.icr.io/cp/cp4a/baw/workflow-server-dbhandling + ## Image tag for Workflow server database handling + tag: 20.0.2 + ## Pull policy for Workflow server database handling + pullPolicy: IfNotPresent + bas_auto_import_job: + ## BAS toolkit init image repository URL + repository: cp.icr.io/cp/cp4a/baw/toolkit-installer + ## Image tag for BAS toolkit init image + tag: 20.0.2 + ## Pull policy for BAS toolkit init image + pullPolicy: IfNotPresent + ibm_workplace_job: + ## IBM Workplace deployment job image repository URL + repository: cp.icr.io/cp/cp4a/baw/iaws-ibm-workplace + ## Image tag for IBM Workplace deployment job image + tag: 20.0.2 + ## Pull policy for IBM Workplace deployment job image + pull_policy: IfNotPresent + + ## The database configuration for Workflow server + database: + ## Whether to enable Secure Sockets Layer (SSL) support for the Workflow server database connection + ssl: false + ## Secret name for storing the database TLS certificate when an SSL connection is enabled + sslsecretname: "" + ## Workflow server database type + type: "DB2" + ## Workflow server database server name. + server_name: "" + ## Workflow server database name + database_name: "" + ## Workflow server database port. For DB2, the default value is "50000" + port: "" + ## Workflow server database secret name + secret_name: "" + ## Workflow server database connect pool maximum number of physical connections + cm_max_pool_size: 200 + dbcheck: + # The maximum waiting time (seconds) to check the database intialization status + wait_time: 900 + # The interval time (seconds) to check. + interval_time: 15 + hadr: + ## Database standby host for high availability disaster recovery (HADR) + ## To enable database HADR, configure both standby host and port + standbydb_host: + ## Database standby port for HADR + standbydb_port: + ## Retry interval for HADR + retryinterval: + ## Maximum retries for HADR + maxretries: + + ## The configurations for content integration + content_integration: + init_job_image: + ## Image name for content integration container. + repository: cp.icr.io/cp/cp4a/baw/iaws-ps-content-integration + ## Image tag for content integration container + tag: 20.0.2 + ## Pull policy for content integration container. + pull_policy: IfNotPresent + ## Domain name for content integration + domain_name: "" + ## Object Store name for content integration + object_store_name: "" + ## Admin secret for content integration + cpe_admin_secret: "" + + ## Application engine configuration, because application engine is an array, + ## when there is only one Application engine deployed along with this CR, below three parameters are not required. + ## when there is more then one application engine deployed, below three parameters are required. + appengine: + ## App Engine hostname + hostname: "" + ## App Engine port + port: "443" + ## App Engine admin secret name + admin_secret_name: "" + + ## The configuration for Resource Registry if you want to use external Resource Registry + resource_registry: + ## Resource Registry host name + hostname: "" + ## Resource Registry port + port: 443 + ## Resource Registry administrative secret + admin_secret_name: "" + + ## The configuration for Java Messaging Service(JMS) + jms: + image: + ## Image name for Java Messaging Service container + repository: cp.icr.io/cp/cp4a/baw/jms + ## Image tag for Java Messaging Service container + tag: 20.0.2 + ## Pull policy for Java Messaging Service container + pull_policy: IfNotPresent + tls: + ## TLS secret name for Java Message Service (JMS) + tls_secret_name: ibm-jms-tls-secret + resources: + limits: + ## Memory limit for JMS configuration + memory: "2Gi" + ## CPU limit for JMS configuration + cpu: "1000m" + requests: + ## Requested amount of memory for JMS configuration + memory: "512Mi" + ## Requested amount of CPU for JMS configuration + cpu: "200m" + storage: + ## Whether to enable persistent storage for JMS + persistent: true + ## Size for JMS persistent storage + size: "1Gi" + ## Whether to enable dynamic provisioning for JMS persistent storage + use_dynamic_provisioning: true + ## Access modes for JMS persistent storage + access_modes: + - ReadWriteOnce + ## Storage class name for JMS persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + ## Resource configuration + resources: + limits: + ## CPU limit for Workflow server. + cpu: 2 + ## Memory limit for Workflow server + memory: 2096Mi + requests: + ## Requested amount of CPU for Workflow server + cpu: "500m" + ## Requested amount of memory for Workflow server. + memory: 1048Mi + + ## liveness and readiness probes configuration + probe: + ws: + liveness_probe: + ## Number of seconds after the Workflow server container starts before the liveness probe is initiated + initial_delay_seconds: 300 + readinessProbe: + ## Number of seconds after the Workflow server container starts before the readiness probe is initiated + initial_delay_seconds: 240 + + ## log trace configuration + logs: + ## Format for printing logs on the console + console_format: "json" + ## Log level for printing logs on the console + console_log_level: "INFO" + ## Source of the logs for printing on the console + console_source: "message,trace,accessLog,ffdc,audit" + ## Format for printing message logs on the console + message_format: "basic" + ## Format for printing trace logs on the console + trace_format: "ENHANCED" + ## Specification for printing trace logs + trace_specification: "*=info" + + ## storage configuration + storage: + ## Set to true to use dynamic storage provisioner. If set to false, then need set existing_pvc_for_logstore and existing_pvc_for_dumpstore + use_dynamic_provisioning: true + ## The persistent volume claim for logs + existing_pvc_for_logstore: "" + ## The minimum size of the persistent volume used mounted as log store + size_for_logstore: "10Gi" + ## The persistent volume claim for dump files + existing_pvc_for_dumpstore: "" + ## The minimum size of the persistent volume used mounted as dump store + size_for_dumpstore: "10Gi" + + ## JVM options separated with space, for example: -Dtest1=test -Dtest2=test2 + jvm_customize_options: + + ## Workflow server custom plain XML snippet + ## liberty_custom_xml: |+ + ## + ## + ## + liberty_custom_xml: + + ## Workflow server custom XML secret name that contains custom configuraiton in Liberty server.xml + custom_xml_secret_name: + + ## Workflow server Lombardi custom XML secret name that contains custom configuraiton in 100Custom.xml + lombardi_custom_xml_secret_name: + + ##################################################################### + ## IBM App Engine production configuration ## + ##################################################################### + application_engine_configuration: + ## The application_engine_configuration is a list, you can deploy multiple instances of AppEngine, you can assign different configurations for each instance. + ## For each instance, application_engine_configuration.name and application_engine_configuration.name.hostname must be assigned to different values. + - name: instance1 + # If you inputed hostname and port here. They will be used always + # If you are using pattern mode (the shared_configuration.sc_deployment_patterns contains value) + # Then you don't need to fill the hostname and port. It will use shared_configuration.sc_deployment_hostname_suffix to generate one + # But if you haven't input suffix. And no hostname port assigned. A error will be reported in operator log during deploy + # For non pattern mode you must assign a valid hostname and port here + hostname: + port: 443 + # Inside the admin secret. There are two must fields + # AE_DATABASE_PWD: + # AE_DATABASE_USER: + admin_secret_name: + # The default admin user id for application engine + # The user ID should be bootstrap admin ID for IBM Business Automation Navigator. It is case sensitive. + # The same ID should be a User Management Service (UMS) admin user also. + admin_user: + external_tls_secret: + external_connection_timeout: 90s + replica_size: 1 + ## optional when db2, must required when oracle + user_custom_jdbc_drivers: false + service_type: Route + autoscaling: + enabled: false + max_replicas: 5 + min_replicas: 2 + target_cpu_utilization_percentage: 80 + database: + # AE Database host name or IP when the database is DB2 + host: + # AE Database name when the database is DB2 + name: + # AE database port number when the database is DB2 + port: + ## If you setup DB2 HADR and want to use it, you need to configure alternative_host and alternative_port, or else, leave is as blank. + alternative_host: + alternative_port: + ## Only DB2, Oracle is supported + type: db2 + ## Required only when type is Oracle, both ssl and non-ssl. The format must be purely oracle descriptor like (DESCRIPTION=(ADDRESS=(PROTOCOL=TCP)(HOST=)(PORT=))(CONNECT_DATA=(SERVICE_NAME=))) + oracle_url_without_wallet_directory: + enable_ssl: false + ## Required only when type is Oracle and enable_ssl is true. The format must be purely oracle descriptor. SSO wallet directory must be specified and fixed to (MY_WALLET_DIRECTORY=/shared/resources/oracle/wallet). + oracle_url_with_wallet_directory: + ## Required only when enable_ssl is true, both db2 and oracle db type + db_cert_secret_name: + ## Required only when type is oracle and enable_ssl is true. + oracle_sso_wallet_secret_name: + ## Optional. If it is empty, the DBASB is default when db2 and the AE_DATABASE_USER set in the admin_secret_name is default when oracle + current_schema: DBASB + initial_pool_size: 1 + max_pool_size: 10 + uv_thread_pool_size: 4 + max_lru_cache_size: 1000 + max_lru_cache_age: 600000 + dbcompatibility_max_retries: 30 + dbcompatibility_retry_interval: 10 + ## The persistent volume claim for custom JDBC Drivers if using the custom jdbc drivers is enabled + custom_jdbc_pvc: + log_level: + node: info + browser: 2 + content_security_policy: + enable: false + whitelist: + env: + max_size_lru_cache_rr: 1000 + server_env_type: development + purge_stale_apps_interval: 86400000 + apps_threshold: 100 + stale_threshold: 172800000 + images: + pull_policy: IfNotPresent + db_job: + repository: cp.icr.io/cp/cp4a/aae/solution-server-helmjob-db + tag: 20.0.2 + solution_server: + repository: cp.icr.io/cp/cp4a/aae/solution-server + tag: 20.0.2 + max_age: + auth_cookie: "900000" + csrf_cookie: "3600000" + static_asset: "2592000" + hsts_header: "2592000" + probe: + liveness: + failure_threshold: 5 + initial_delay_seconds: 60 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + readiness: + failure_threshold: 5 + initial_delay_seconds: 10 + period_seconds: 10 + success_threshold: 1 + timeout_seconds: 180 + # Redis settings only when you set session.use_external_store to true + redis: + # Your external redis host/ip + host: localhost + # Your external redis port + port: 6379 + ttl: 1800 + resource_ae: + limits: + cpu: 2000m + memory: 2Gi + requests: + cpu: 300m + memory: 512Mi + resource_init: + limits: + cpu: 500m + memory: 256Mi + requests: + cpu: 100m + memory: 128Mi + session: + check_period: "3600000" + duration: "1800000" + max: "10000" + resave: "false" + rolling: "true" + save_uninitialized: "false" + # By setting this option to true. The AE will use external Redis as session storage + # To support multiple AE pods + use_external_store: "false" + tls: + tls_trust_list: [] + # If you want to make the replicate size more than 1 for this cluster. Then you must enable the shared storage + share_storage: + enabled: false + # If you create the PV manually. Then please provide the PVC name bind here + pvc_name: + auto_provision: + enabled: false + # Required if you enabled the auto provision + storage_class: + size: 20Gi + + ######################################################################## + ######## IBM FileNet Content Manager configuration ######## + ######################################################################## + ecm_configuration: + + ## FNCM secret that contains GCD DB user name and password, Object Store DB user name and password, + ## LDAP user and password, CPE username and password, keystore password, and LTPA passs, etc. + fncm_secret_name: ibm-fncm-secret + + #################################### + ## Start of configuration for CPE ## + #################################### + cpe: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cpe + tag: ga-555-p8cpe + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 3072Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CPE Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cpe_production_setting: + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 18 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 33 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + ## Default JNDI name for GCD for non-XA data source + gcd_jndi_name: FNGCDDS + ## Default JNDI name for GCD for XA data source + gcd_jndixa_name: FNGCDDSXA + license_model: FNCM.PVUNonProd + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: true + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: true + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: true + + ## Persistent Volume Claims for CPE. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cpe_cfgstore: "cpe-cfgstore" + existing_pvc_for_cpe_logstore: "cpe-logstore" + existing_pvc_for_cpe_filestore: "cpe-filestore" + existing_pvc_for_cpe_icmrulestore: "cpe-icmrulesstore" + existing_pvc_for_cpe_textextstore: "cpe-textextstore" + existing_pvc_for_cpe_bootstrapstore: "cpe-bootstrapstore" + existing_pvc_for_cpe_fnlogstore: "cpe-fnlogstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ##################################### + ## Start of configuration for CMIS ## + ##################################### + cmis: + ## The architecture of the cluster. This is the default for Linux on x86 and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + ## The default repository is the IBM Entitled Registry. + repository: cp.icr.io/cp/cp4a/fncm/cmis + tag: ga-305-cmis + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 256Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default CMIS Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + cmis_production_setting: + ## By default, this parameter is set by the Operator using the CPE service endpoint (e.g., "http://{{ meta.name }}-cpe-svc:9080/wsi/FNCEWS40MTOM") + cpe_url: + + time_zone: Etc/UTC + + ## The initial use of available memory. + jvm_initial_heap_percentage: 40 + ## The maximum percentage of available memory to use. + jvm_max_heap_percentage: 66 + + ## Use this "jvm_customize_options" parameter to specify JVM arguments using comma separation. For example, if you want to set the following JVM arguments: + ## -Dmy.test.jvm.arg1=123 + ## -Dmy.test.jvm.arg2=abc + ## -XX:+SomeJVMSettings + ## -XshowSettings:vm" + ## Then set the following: jvm_customize_options="-Dmy.test.jvm.arg1=123,-Dmy.test.jvm.arg2=abc,-XX:+SomeJVMSettings,-XshowSettings:vm" + jvm_customize_options: + + checkout_copycontent: true + default_maxitems: 25 + cvl_cache: true + secure_metadata_cache: false + filter_hidden_properties: true + querytime_limit: 180 + resumable_queries_forrest: true + escape_unsafe_string_characters: false + max_soap_size: 180 + print_pull_stacktrace: false + folder_first_search: false + ignore_root_documents: false + supporting_type_mutability: false + license: accept + + ## Enable/disable monitoring where metrics can be sent to Graphite or scraped by Prometheus + monitor_enabled: true + ## Enable/disable logging where logs can be sent to Elasticsearch. + logging_enabled: true + + ## By default, the plugin for Graphite is enable to emit container metrics. + collectd_enable_plugin_write_graphite: true + + ## Persistent Volume Claims for CMIS. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_cmis_cfgstore: "cmis-cfgstore" + existing_pvc_for_cmis_logstore: "cmis-logstore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 90 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 180 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM Business Automation Navigator configuration ######## + ######################################################################## + navigator_configuration: + + ## Navigator secret that contains user credentials for LDAP and database + ban_secret_name: ibm-ban-secret + + ## The architecture of the cluster. This is the default for Linux and should not be changed. + arch: + amd64: "3 - Most preferred" + + ## The number of replicas or pods to be deployed. The default is 1 replica and for high availability in a production env, + ## it is recommended to have 2 or more. + replica_count: 1 + + ## This is the image repository and tag that correspond to image registry, which is where the image will be pulled. + image: + + ## The default repository is the IBM Entitled Registry + repository: cp.icr.io/cp/cp4a/ban/navigator-sso + tag: ga-308-icn + + ## This will override the image pull policy in the shared_configuration. + pull_policy: IfNotPresent + + ## Logging for workloads. This is the default setting. + log: + format: json + + ## The initial resources (CPU, memory) requests and limits. If more resources are needed, + ## make the changes here to meet your requirement. + resources: + requests: + cpu: 500m + memory: 512Mi + limits: + cpu: 1 + memory: 1536Mi + + ## By default "Autoscaling" is enabled with the following settings with a minimum of 1 replca and a maximum of 3 replicas. Change + ## this settings to meet your requirement. + auto_scaling: + enabled: true + max_replicas: 3 + min_replicas: 1 + ## This is the default cpu percentage before autoscaling occurs. + target_cpu_utilization_percentage: 80 + + ## Below are the default ICN Production settings. Make the necessary changes as you see fit. Refer to Knowledge Center documentation for details. + icn_production_setting: + timezone: Etc/UTC + jvm_initial_heap_percentage: 40 + jvm_max_heap_percentage: 66 + jvm_customize_options: + icn_db_type: db2 + icn_jndids_name: ECMClientDS + icn_schema: ICNDB + icn_table_space: ICNDB + allow_remote_plugins_via_http: false + + ## Default settings for monitoring + monitor_enabled: false + ## Default settings for logging + logging_enabled: false + + ## Persistent Volume Claims for ICN. If the storage_configuration in the shared_configuration is configured, + ## the Operator will create the PVC using the names below. + datavolume: + existing_pvc_for_icn_cfgstore: "icn-cfgstore" + existing_pvc_for_icn_logstore: "icn-logstore" + existing_pvc_for_icn_pluginstore: "icn-pluginstore" + existing_pvc_for_icnvw_cachestore: "icn-vw-cachestore" + existing_pvc_for_icnvw_logstore: "icn-vw-logstore" + existing_pvc_for_icn_aspera: "icn-asperastore" + + ## Default values for both rediness and liveness probes. Modify these values to meet your requirements. + probe: + readiness: + initial_delay_seconds: 120 + period_seconds: 5 + timeout_seconds: 10 + failure_threshold: 6 + liveness: + initial_delay_seconds: 600 + period_seconds: 5 + timeout_seconds: 5 + failure_threshold: 6 + + ## Only use this parameter if you want to override the image_pull_secrets setting in the shared_configuration above. + image_pull_secrets: + name: "admin.registrykey" + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + existing_claim_name: + replica_count: 2 + service_type: Route + # your external UMS host name, only required if there is no sc_deployment_hostname_suffix given + hostname: + port: 443 + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + admin_secret_name: ibm-dba-ums-secret + ## optional for secure communication with UMS + external_tls_secret_name: + ## optional for secure communication with UMS + external_tls_ca_secret_name: + ## optional for secure communication with UMS + external_tls_teams_secret_name: + ## optional for secure communication with UMS + external_tls_scim_secret_name: + ## optional for secure communication with UMS + external_tls_sso_secret_name: + oauth: + ## optional: full DN of an LDAP group that is authorized to manage OIDC clients, in addition to primary admin from admin secret + client_manager_group: + ## optional: full DN of an LDAP group that is authorized to manage app_tokens, in addition to primary admin from admin secret + token_manager_group: + ## optional: lifetime of OAuth access_tokens. default is 7200s + access_token_lifetime: + ## optional: lifetime of app-tokens. default is 366d + app_token_lifetime: + ## optional: lifetime of app-passwords. default is 366d + app_password_lifetime: + ## optional: maximimum number of app-tokens or app-passwords per client. default is 100 + app_token_or_password_limit: + ## optional: encoding / encryption when sotring client secrets in OAuth database. Default is xor for compatibility. Recommended value is PBKDF2WithHmacSHA512 + client_secret_encoding: + resources: + limits: + cpu: 500m + memory: 512Mi + requests: + cpu: 200m + memory: 256Mi + ## Horizontal Pod Autoscaler + autoscaling: + enabled: true + min_replicas: 2 + max_replicas: 5 + target_average_utilization: 98 + use_custom_jdbc_drivers: false + use_custom_binaries: false + custom_secret_name: + custom_xml: + logs: + console_format: json + console_log_level: INFO + console_source: message,trace,accessLog,ffdc,audit + trace_format: ENHANCED + trace_specification: "*=info" + + ######################################################################## + ######## Resource Registry configuration ######## + ######################################################################## + resource_registry_configuration: + images: + pull_policy: IfNotPresent + resource_registry: + repository: cp.icr.io/cp/cp4a/baw/dba-etcd + tag: 20.0.2 + admin_secret_name: resource-registry-admin-secret + replica_size: 3 + probe: + liveness: + initial_delay_seconds: 60 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + readiness: + initial_delay_seconds: 10 + period_seconds: 10 + timeout_seconds: 5 + success_threshold: 1 + failure_threshold: 3 + resource: + limits: + cpu: "500m" + memory: "512Mi" + requests: + cpu: "100m" + memory: "128Mi" + auto_backup: + enable: true + minimal_time_interval: 300 + pvc_name: "{{ meta.name }}-dba-rr-pvc" + dynamic_provision: + enable: true + size: 3Gi + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + ######################################################################## + ######## IBM Process Federation Server configuration ######## + ######################################################################## + pfs_configuration: + ## Process Federation Server hostname + hostname: "" + ## Process Federation Server port + port: 443 + ## How the HTTPS endpoint service should be published. Possible values are ClusterIP, NodePort, Route + service_type: Route + + ## If use the external elasticsearch server, provide the following configuration + external_elasticsearch: + ## The endpoint of external elasticearch, such as: https://: + endpoint: "" + ## The external elasticsearch administrative secret + admin_secret_name: "" + + image: + ## Process Federation Server image + repository: cp.icr.io/cp/cp4a/baw/pfs-prod + ## Process Federation Server image tag + tag: "20.0.2" + ## Process Federation Server image pull policy + pull_policy: IfNotPresent + + ## Number of initial Process Federation Server pods + replicas: 1 + ## Service account name for Process Federation Server pod + service_account: + ## Whether Kubernetes can (soft) or must not (hard) deploy Process Federation Server pods onto the same node. Possible values are "soft" and "hard". + anti_affinity: hard + + ## Whether to enable default security roles and possible values are: true and false + enable_default_security_roles: true + ## Name of the secret containing the Process Federation Server administration passwords, such as ltpaPassword, oidcClientPassword, sslKeyPassword + admin_secret_name: ibm-pfs-admin-secret + ## Name of the secret containing the files that will be mounted in the /config/configDropins/overrides folder + config_dropins_overrides_secret: + ## Name of the secret containing the files that will be mounted in the /config/resources/security folder + resources_security_secret: "" + ## Name of the custom libraries containing the files that will be mounted in the /config/resources/libs folder + custom_libs_pvc: "" + ## Whether to enable notification server and possible values are: true and false + enable_notification_server: true + ## The secret that contains the Transport Layer Security (TLS) key and certificate for external https visits. You can enter the secret name here. + ## If you do not want to use the customized external TLS certificate, leave it empty. + external_tls_secret: + ## Certificate authority (CA) used to sign the external TLS secret. It is stored in the secret with the TLS key and certificate. You can enter the secret name here. + ## If you don't want to use the customized CA to sign the external TLS certificate, leave it empty. + external_tls_ca_secret: + + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + tls: + ## Existing TLS secret containing tls.key and tls.crt + tls_secret_name: + ## Existing TLS trust secret list + tls_trust_list: + + resources: + requests: + ## Requested amount of CPU for PFS configuration + cpu: 500m + ## Requested amount of memory for PFS configuration + memory: 512Mi + limits: + ## CPU limit for PFS configuration + cpu: 2 + ## Memory limit for PFS configuration + memory: 4Gi + + liveness_probe: + ## Number of seconds after Process Federation Server container starts before the liveness probe is initiated + initial_delay_seconds: 300 + readiness_probe: + ## Number of seconds after Process Federation Server container starts before the readiness probe is initiated + initial_delay_seconds: 240 + + saved_searches: + ## Name of the Elasticsearch index used to store saved searches + index_name: ibmpfssavedsearches + ## Number of shards of the Elasticsearch index used to store saved searches + index_number_of_shards: 3 + ## Number of replicas (pods) of the Elasticsearch index used to store saved searches + index_number_of_replicas: 1 + ## Batch size used when retrieving saved searches + index_batch_size: 100 + ## Amount of time before considering an update lock as expired. Valid values are numbers with a trailing 'm' or 's' for minutes or seconds + update_lock_expiration: 5m + ## Amount of time before considering a unique constraint as expired. Valid values are numbers with a trailing 'm' or 's' for minutes or seconds + unique_constraint_expiration: 5m + + security: + sso: + ## The ssoDomainNames property of the tag + domain_name: + ## The ssoCookieName property of the tag + cookie_name: "ltpatoken2" + ltpa: + ## The keysFileName property of the tag + filename: "ltpa.keys" + ## The expiration property of the tag + expiration: "120m" + ## The monitorInterval property of the tag + monitor_interval: "60s" + ## The sslProtocol property of the tag used as default SSL config + ssl_protocol: SSL + + executor: + ## Value of the maxThreads property of the tag + max_threads: "80" + ## Value of the coreThreads property of the tag + core_threads: "40" + + rest: + ## Value of the userGroupCheckInterval property of the tag + user_group_check_interval: "300s" + ## Value of the systemStatusCheckInterval property of the tag + system_status_check_interval: "60s" + ## Value of the bdFieldsCheckInterval property of the tag + bd_fields_check_interval: "300s" + + custom_env_variables: + ## Names of the custom environment variables defined in the secret referenced in pfs.customEnvVariables.secret + names: + # - name: MY_CUSTOM_ENVIRONMENT_VARIABLE + ## Secret holding custom environment variables + secret: + + ## log trace configuration + logs: + ## Format for printing logs on the console + console_format: "json" + ## Log level for printing logs on the console + console_log_level: "INFO" + ## Source of the logs for printing on the console + console_source: "message,trace,accessLog,ffdc,audit" + ## Format for printing message logs on the console + message_format: "basic" + ## Format for printing trace logs on the console + trace_format: "ENHANCED" + ## Specification for printing trace logs + trace_specification: "*=info" + storage: + ## Use Dynamic Provisioning for PFS Logs Data Storage + use_dynamic_provisioning: true + ## The minimum size of the persistent volume used mounted as PFS Liberty server /logs folder + size: 5Gi + ## Storage class of the persistent volume used mounted as PFS Liberty server /logs folder + storage_class: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname }}" + + ## When PFS is deployed in a environment that includes the Resource Registry , + ## the following additional parameters can be used to configure the integration between PFS and the Resource Registry + dba_resource_registry: + ## Time to live of the lease that creates the PFS entry in the DBA Resource Registry, in seconds. + lease_ttl: 120 + ## The interval at which to check that PFS is running, in seconds. + pfs_check_interval: 10 + ## The number of seconds after which PFS will be considered as not running if no connection can be perfomed + pfs_connect_timeout: 10 + ## The number of seconds after which PFS will be considered as not running if has not yet responded + pfs_response_timeout: 30 + ## The key under which PFS should be registered in the DBA Service Registry when running + pfs_registration_key: /dba/appresources/IBM_PFS/PFS_SYSTEM + resources: + limits: + ## Memory limit for PFS and RR integration pod + memory: '512Mi' + ## CPU limit for PFS and RR integration pod + cpu: '500m' + requests: + ## Requested amount of memory for PFS and RR integration pod + memory: '512Mi' + ## Requested amount of CPU for PFS and RR integration pod + cpu: '200m' + + ######################################################################## + ######## Embedded Elasticsearch configuration ######## + ######################################################################## + elasticsearch_configuration: + es_image: + ## Elasticsearch image + repository: cp.icr.io/cp/cp4a/baw/pfs-elasticsearch-prod + ## Elasticsearch image tag + tag: "20.0.2" + ## Elasticsearch image pull policy + pull_policy: IfNotPresent + es_init_image: + ## The image used by the privileged init container to configure Elasticsearch system settings. + ## This value is only relevant if elasticsearch_configuration.privileged is set to true + repository: cp.icr.io/cp/cp4a/baw/pfs-init-prod + ## The image tag for Elasticsearch init container + tag: "20.0.2" + ## The pull policy for Elasticsearch init container + pull_policy: IfNotPresent + es_nginx_image: + ## The name of the Nginx docker image to be used by Elasticsearch pods + repository: cp.icr.io/cp/cp4a/baw/pfs-nginx-prod + ## The image tag of the Nginx docker image to be used by Elasticsearch pods + tag: "20.0.2" + ## The pull policy for the Nginx docker image to be used by Elasticsearch pods + pull_policy: IfNotPresent + + ## Number of initial Elasticsearch pods + replicas: 1 + ## How the HTTPS endpoint service should be published. The possible values are ClusterIP and NodePort + service_type: ClusterIP + ## The port to which the Elasticsearch server HTTPS endpoint will be exposed externally. + ## This parameter is relevant only if elasticsearch_configuration.service_type is set to NodePort + external_port: + ## The elasticsearch admin secret that contains the username, password and .htpasswd. + ## If not provided, the defualt admin secret named "{{ meta.name }}-elasticsearch-admin-secret" is used. + admin_secret_name: + ## Whether Kubernetes "may" (soft) or "must not" (hard) deploy Elasticsearch pods onto the same node + ## The possible values are "soft" and "hard" + anti_affinity: hard + ## Name of a service account to use. + ## If elasticsearch_configuration.privileged is set to true, then this service account must allow running privileged containers. + ## If not provided, the default service account named "{{ meta.name }}-elasticsearch-service-account" is used. + service_account: + ## When set to true, a privileged container will be created to execute the appropriate sysctl commands so that the node running the pods matches the elasticsearch requirements. + privileged: true + ## Initial delay for liveness and readiness probes of Elasticsearch pods + probe_initial_delay: 90 + ## The JVM heap size to allocate to each Elasticsearch pod + heap_size: "1024m" + ## Specify whether to use the built-in monitoring capability + monitor_enabled: false + + resources: + limits: + ## Memory limit for Elasticsearch configuration + memory: "2Gi" + ## CPU limit for Elasticsearch configuration + cpu: "1000m" + requests: + ## Requested amount of memory for Elasticsearch configuration + memory: "1Gi" + ## Requested amount of CPU for Elasticsearch configuration + cpu: "100m" + + storage: + ## If persistent the elasticsearch data. Set to false for non-production or trial-only deployment. + persistent: true + ## Set to true to use dynamic storage provisioner + use_dynamic_provisioning: true + ## The minimum size of the persistent volume + size: 10Gi + ## Storage class name for Elasticsearch persistent storage + storage_class: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname }}" + + snapshot_storage: + ## If persistent the elasticsearch snapshot storage. Set to true for production deployment. + enabled: false + ## Set to true to use dynamic storage provisioner + use_dynamic_provisioning: true + ## The minimum size of the persistent volume + size: 30Gi + ## Storage class name for Elasticsearch persistent snapshot storage + storage_class_name: "" + ## By default, a new persistent volume claim is be created. Specify an existing claim here if one is available. + existing_claim_name: "" + + ######################################################################## + ######## IBM FileNet Content Manager initialize configuration ######## + ######################################################################## + initialize_configuration: + ic_domain_creation: + ## Provide a name for the domain + domain_name: "P8DOMAIN" + ## The encryption strength + encryption_key: "128" + ic_ldap_creation: + ## Administrator user + ic_ldap_admin_user_name: + - "" + ## Administrator group + ic_ldap_admins_groups_name: + - "" + ## Name of the LDAP directory + ic_ldap_name: "ldap_name" + ic_obj_store_creation: + object_stores: + ## Configuration for the document object store + ## Display name for the document object store to create + - oc_cpe_obj_store_display_name: "DOCS" + ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "DOCS" + oc_cpe_obj_store_conn: + ## Object store connection name + name: "DOCS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSDOCS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSDOCSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os01_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: false + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 1 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "" + + ## Configuration for the design object store + ## Display name for the design object store to create + - oc_cpe_obj_store_display_name: "DOS" + ## ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "DOS" + oc_cpe_obj_store_conn: + ## ## Object store connection name + name: "DOS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSDOS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSDOSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os02_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: false + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 2 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "" + + ## Configuration for the target object store + ## Display name for the target object store to create + - oc_cpe_obj_store_display_name: "TOS" + ## Symbolic name for the document object store to create + oc_cpe_obj_store_symb_name: "TOS" + oc_cpe_obj_store_conn: + ## Object store connection name + name: "TOS_connection" #database connection name + ## The name of the site + site_name: "InitialSite" + ## Add the name of the object store database + dc_os_datasource_name: "FNDSTOS" + ## The XA datasource + dc_os_xa_datasource_name: "FNDSTOSXA" + ## Admin user group + oc_cpe_obj_store_admin_user_groups: + - "" + ## An array of users with access to the object store + oc_cpe_obj_store_basic_user_groups: + ## Specify whether to enable add-ons + oc_cpe_obj_store_addons: true + ## Add-ons to enable for Content Platform Engine + oc_cpe_obj_store_addons_list: + - "{CE460ADD-0000-0000-0000-000000000004}" + - "{CE460ADD-0000-0000-0000-000000000001}" + - "{CE460ADD-0000-0000-0000-000000000003}" + - "{CE460ADD-0000-0000-0000-000000000005}" + - "{CE511ADD-0000-0000-0000-000000000006}" + - "{CE460ADD-0000-0000-0000-000000000008}" + - "{CE460ADD-0000-0000-0000-000000000007}" + - "{CE460ADD-0000-0000-0000-000000000009}" + - "{CE460ADD-0000-0000-0000-00000000000A}" + - "{CE460ADD-0000-0000-0000-00000000000B}" + - "{CE460ADD-0000-0000-0000-00000000000D}" + - "{CE511ADD-0000-0000-0000-00000000000F}" + ## Provide a name for the Advance Storage Area + oc_cpe_obj_store_asa_name: "demo_storage" + ## Provide a name for the file system storage device + oc_cpe_obj_store_asa_file_systems_storage_device_name: "demo_file_system_storage" + ## The root directory path for the object store storage area + oc_cpe_obj_store_asa_root_dir_path: "/opt/ibm/asa/os03_storagearea" + ## Specify whether to enable workflow for the object store + oc_cpe_obj_store_enable_workflow: true + ## Specify a name for the workflow region + oc_cpe_obj_store_workflow_region_name: "" + ## Specify the number of the workflow region + oc_cpe_obj_store_workflow_region_number: 3 + ## Specify a table space for the workflow data + oc_cpe_obj_store_workflow_data_tbl_space: "VWDATA_TS" + ## Optionally specify a table space for the workflow index + oc_cpe_obj_store_workflow_index_tbl_space: "VWINDEX_TS" + ## Optionally specify a table space for the workflow blob. + oc_cpe_obj_store_workflow_blob_tbl_space: "VWBLOB_TS" + ## Designate an LDAP group for the workflow admin group. + oc_cpe_obj_store_workflow_admin_group: "" + ## Designate an LDAP group for the workflow config group + oc_cpe_obj_store_workflow_config_group: "" + ## Default format for date and time + oc_cpe_obj_store_workflow_date_time_mask: "mm/dd/yy hh:tt am" + ## Locale for the workflow + oc_cpe_obj_store_workflow_locale: "en" + ## Provide a name for the connection point + oc_cpe_obj_store_workflow_pe_conn_point_name: "cpe_conn_tos" diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_aca.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_aca.yaml new file mode 100644 index 00000000..d556158c --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_aca.yaml @@ -0,0 +1,120 @@ +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4aca + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + ##################################################################################################################### + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you are updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template into your existing CR file already applied in the environment. + ## + ###################################################################################################################### + shared_configuration: + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitlement Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## The deployment type as selected by the user. Possible values are: demo, enterprise. + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP, ROKS,IKS and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## CP4A patterns or capabilities to be deployed. This CR represents the "contentanalyzer" pattern, which includes the following + ## mandatory components: ums, rr, icn (BAN/Navigator), bastudio + sc_deployment_patterns: contentanalyzer + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## Content Analyzer does not have optional components to customize. + sc_optional_components: + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for ACA (Content Analyzer) + dc_ca_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". + dc_database_type: "" + ## Provide the primary database server name and if your database server name cannot be resolvable by DNS, then provide the corresponding IP address for the `database_IP` parameter below. + database_servername: "" + ## Provide the name of the BASE database for ACA. For example: "BASECA" + database_name: "" + ## Provide the names of the TENANT databases for ACA. + tenant_databases: + - "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + database_port: "" + ## Enable SSL/TLS for database communication. Refer to Knowledge Center for more info. + dc_database_ssl_enabled: false + + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + + ## Provide the standby database server name and if your standby database server name cannot be resolvable by DNS, then provide the corresponding IP address for the `dc_hadr_standby_ip` parameter below. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## Provide the primary database server IP address if database_servername cannot be resolved by DNS. + database_ip: "" + ## Provide the standby database server IP address if dc_hadr_standby_servername cannot be resolved by DNS. + dc_hadr_standby_ip: "" + diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_application.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_application.yaml new file mode 100644 index 00000000..b699c0aa --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_application.yaml @@ -0,0 +1,72 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + ##################################################################################################################### + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + shared_configuration: + + ## CP4A patterns or capabilities to be deployed. This CR represents the "application" pattern (aka Business Automation Manager), which includes the following + ## mandatory components: ban(Business Automation Navigator), ums (User Management Service), app_engine( Application Engine) and optional components: app_designer + sc_deployment_patterns: application + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: app_designer + sc_optional_components: + + ########################################################################### + ## This section contains the app engine component configurations # + ########################################################################### + application_engine_configuration: + ## The application_engine_configuration is a list. You can deploy multiple instances of App Engine and assign different configurations for each instance. + ## For each instance, application_engine_configuration.name and application_engine_configuration.name.hostname must have different values. + - name: workspace + #Adjust this one if you created the secret with name other than the default + admin_secret_name: "{{ meta.name }}-workspace-aae-app-engine-admin-secret" + #Provide application engine default administrator ID + admin_user: "" + database: + #Provide the database server hostname for application engine use + host: "" + #Provide the database name for application engine use + name: "" + #Provide the database server port for application engine use + port: "" + ## If you set up DB2 HADR and want to use it, you must configure alternative_host and alternative_port. Otherwise, leave them blank. + alternative_host: + alternative_port: + type: db2 + diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_content.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_content.yaml new file mode 100644 index 00000000..9d49e9fa --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_content.yaml @@ -0,0 +1,267 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + appVersion: 20.0.2 + shared_configuration: + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the rest of the licenses. + sc_deployment_fncm_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images. + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## All CP4A components must use/share the root_ca_secret in order for integration. + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "content" pattern (aka FileNet Content Manager), which includes the following + ## mandatory components: cpe, icn (BAN/Navigator), graphql and optional components: cmis, es (External Share). + sc_deployment_patterns: content + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: cmis, css (Content Search Services), es (External Share). + sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: demo, enterprise. + sc_deployment_type: enterprise + + ## Specify the RunAsUser for the security context of the pod. This is usually a numeric value that correponds to a user ID. + sc_run_as_user: + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## Enable/disable ECM (FNCM) / BAN initialization (e.g., creation of P8 domain, creation/configuration of object stores, + ## creation/configuration of CSS servers, and initialization of Navigator (ICN)). If the "initialize_configuration" section + ## is defined in the CR, then that configuration will take precedence overriding this parameter. Note that if you are upgrading or + ## migrating, set this parameter to "false" since the env has been previously initialized. + sc_content_initialization: false + ## Enable/disable the ECM (FNCM) / BAN verification (e.g., creation of test folder, creation of test document, + ## execution of CBR search, and creation of Navigator demo repository and desktop). If the "verify_configuration" + ## section is defined in the CR, then that configuration will take precedence overriding this parameter. Note that if you are upgrading or + ## migrating, set this parameter to "false" since the env has been previously verified. + sc_content_verification: false + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## User script should only uncomment this section if External Share if selected as an optional component. + ## If you are deploying without the User script, uncomment the necessary section (depending + ## if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ext_ldap_configuration: + # lc_selected_ldap_type: "" + # lc_ldap_server: "" + # lc_ldap_port: "" + # lc_bind_secret: ldap-bind-secret + # lc_ldap_base_dn: "" + # lc_ldap_ssl_enabled: true + # lc_ldap_ssl_secret_name: "" + # lc_ldap_user_name_attribute: "" + # lc_ldap_user_display_name_attr: "" + # lc_ldap_group_base_dn: "" + # lc_ldap_group_name_attribute: "" + # lc_ldap_group_display_name_attr: "cn" + # lc_ldap_group_membership_search_filter: "" + # lc_ldap_group_member_id_map: "" + + ## User script will uncomment the section needed based on user's input from User script. + ## If you are deploying without the User script, uncomment the necessary section (depending + ## if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + ## This is the Global Catalog port for the LDAP + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for the GCD datasource for CPE + dc_gcd_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". + dc_database_type: "" + ## The GCD non-XA datasource name. The default value is "FNGCDDS". + dc_common_gcd_datasource_name: "FNGCDDS" + ## The GCD XA datasource name. The default value is "FNGCDDSXA". + dc_common_gcd_xa_datasource_name: "FNGCDDSXA" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the name of the database for the GCD for CPE. For example: "GCDDB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_gcd_jdbc_url: "" + + ## If the database type is Db2 HADR, then complete the rest of the parameters below. + ## Provide the database server name or IP address of the standby database server. + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for the object store 1 (OS1) datasource for CPE + dc_os_datasources: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD configuration above. + - dc_database_type: "" + ## The OS1 non-XA datasource name. The default value is "FNOS1DS". + dc_common_os_datasource_name: "FNOS1DS" + ## The OS1 XA datasource name. The default value is "FNOS1DSXA". + dc_common_os_xa_datasource_name: "FNOS1DSXA" + ## Provide the database server name or IP address of the database server. This should be the same as the + ## GCD configuration above. + database_servername: "" + ## Provide the name of the database for the object store 1 for CPE. For example: "OS1DB" + database_name: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_os_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_decisions.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_decisions.yaml new file mode 100644 index 00000000..12600f8d --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_decisions.yaml @@ -0,0 +1,90 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + ################################################################################################################# + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + + shared_configuration: + + ## CP4A patterns or capabilities to be deployed. This CR represents Operational Decsision Manage "decisions" pattern + ## that brings Decision Center, Rule Execution Server and Decision Runner, based on the user specification in the + ## sc_optional_components specification + sc_deployment_patterns: decisions + + ## The optional components to be installed if listed here. + ## This is normally populated by the deploy script based on input from the user. + ## User can also manually specify the optional components to be deployed here. + ## This pattern has has 3: decisionCenter, decisionRunner, and decisionServerRuntime selectable components, where + ## decisionRuntime represent the Rule Execution Server. + ## If decisionCenter is set, you also have to set the 'odm_configuration.decisionCenter.enabled' flag to true to install it. + ## If decisionRunner is set, you also have to set the 'odm_configuration.decisionRunner.enabled' flag to true to install it. + ## If decisionServerRuntime is set, you also have to set the 'odm_configuration.decisionRuntime.enabled' flag to true to install it. + sc_optional_components: + + ## The beginning section of database configuration for CP4A + datasource_configuration: + dc_odm_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". + dc_database_type: "db2" + ## Provide the database server name or IP address of the database server. + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + dc_common_database_port: "" + ## Provide the name of the database for ODM. For example: "ODMDB" + dc_common_database_name: "" + ## The name of the secret that contains the credentials to connect to the database. + dc_common_database_instance_secret: "" + + + ######################################################################## + ######## IBM Operational Decision Manager configuration ######## + ######################################################################## + odm_configuration: + # To enable ODM Runtime. + decisionServerRuntime: + enabled: true + # To enable the Authoring part + decisionRunner: + enabled: true + decisionCenter: + enabled: true + + + # For UMS + # customization: + ## Customizes user authentication and management by realizing mapping between UMS Roles and LDAP groups. + # authSecretRef: "" diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_decisions_ads.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_decisions_ads.yaml new file mode 100644 index 00000000..db85db5b --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_decisions_ads.yaml @@ -0,0 +1,67 @@ +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + ################################################################################################################# + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + + shared_configuration: + + + ## CP4A patterns or capabilities to be deployed. This CR represents the "decisions_ads" pattern that brings ADS Designer and ADS Runtime. + sc_deployment_patterns: decisions_ads + + ## The optional components to be installed if listed here. + ## This is normally populated by the User script based on input from the user. + ## User can also manually specify the optional components to be deployed here. + ## ADS has two 'ads_designer' and 'ads_runtime' optional component. + ## If ads_designer is set, you also have to set the 'ads_configuration.decision_designer.enabled' flag to true to install it. + ## If ads_runtime is set, you also have to set the 'ads_configuration.decision_runtime.enabled' flag to true to install it. + sc_optional_components: + + + + ######################################################################## + ######## IBM Business Automation Decision Services ######## + ######################################################################## + # You can further customize the ads_configuration section as explained in the knowledge center. + # See ibm_cp4a_cr_enterprise_FC_decisions_ads.yaml file in descriptors/patterns for all parameters and their default values. + ads_configuration: + + decision_designer: + enabled: false + + decision_runtime: + enabled: false diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_digitalworker.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_digitalworker.yaml new file mode 100644 index 00000000..dea9a7ec --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_digitalworker.yaml @@ -0,0 +1,82 @@ +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + appVersion: 20.0.2 + + ################################################################################################################# + ## The contents of this template CR file reflect only the specific parameters and configuration + ## settings applicable to the represented ICP4A capability. + ## + ## These values/configuration sections are to be used when manually assembling or updating the main + ## ICP4A CR that is being applied in order to install an ICP4A environment. + ## + ## If you are in the process of preparing a new install of an ICP4A environment, + ## you should merge the required values and configuration sections from this file into the + ## starting point CR template: ibm_cp4a_cr_enterprise_foundation.yaml available in the + ## same location as this template. + ## + ## If you updating an existing ICP4A environment, you should merge the required values and configuration + ## sections from this template in the main ICP4A CR file already applied in the environment. + ## + ###################################################################################################################### + shared_configuration: + sc_deployment_patterns: "digitalworker" + # ADW only reacts to one optional component : bai + sc_optional_components: + + ######################################################################## + ######## IBM Business Automation Digital Worker ######## + ######################################################################## + # You can further customize the adw_configuration section as explained in the knowledge center. + # See ibm_cp4a_cr_enterprise_FC_digitalworker.yaml file in descriptors/patterns for all parameters and their default values. + adw_configuration: + + designer: + hostname: "https://designer.{{ shared_configuration.sc_deployment_hostname_suffix }}" + service_type: Route + + runtime: + hostname: "https://runtime.{{ shared_configuration.sc_deployment_hostname_suffix }}" + service_type: Route + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname}}" + + management: + hostname: "https://management.{{ shared_configuration.sc_deployment_hostname_suffix }}" + service_type: Route + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname}}" + + mongo: + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_fast_file_storage_classname}}" + + npmRegistry: + persistence: + storageClassName: "{{ shared_configuration.storage_configuration.sc_medium_file_storage_classname}}" + + #baiKafka: If you want to monitor ADW with BAI, uncomment this block and provide the necessary parameters + # specify the ingress topic where ADW events should be sent + #topic: "{{ meta.name }}-ibm-bai-ingress" + #baiElasticsearch: + # specify the URL of elasticSearch + #url: "" + # specify the URL of kibana + #kibanaUrl: "" diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_foundation.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_foundation.yaml new file mode 100644 index 00000000..4cbbf19e --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_foundation.yaml @@ -0,0 +1,352 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ## CP4A application version + appVersion: 20.0.2 + + ##################################################################################################################### + ## This template CR file reflects the ICP4A plaftorm foundation components shared by most of othe ICP4A capabilities, + ## as well as shared configurations for settings and services applicable to the ICP4A installation environment such + ## as LDAP, data sources, images information, storage classes, and other parameters you can find in the + ## shared_configuration section below. + ## + ## The label, or "pattern name", for these foundation components is "foundation". You see this label set in the + ## sc_deployment_patterns parameter in the shared_configuration section of this file. + ## + ## The ICP4A operator will automatically install the required foundation components, based on the description below, when + ## it detects the "foundation" pattern name amongst the sc_deployment_patters CR parameter of the CR being applied. + ## + ## The foundation components that are automatically installed, by default, by the ICP4A operator are: + ## - Business Automation Navigator (BAN) + ## - Resource Registry (RR) + ## + ## These other foundation components are installed based on their need, but their related minimal configuration + ## is included in this CR template. Those additional platform components are: + ## + ## - Business Automation Studio (BAS) + ## - User Management Services (UMS) + ## - Business Automation Insights (BAI) + ## + ## Note that from the above foundation components, only BAI (with label "bai") will need to be explicitly specified + ## in the sc_optional_components parameter (in the shared_configuration) to inform the operator to install it, as + ## this installation of BAI reflects a user choice since BAI is not a pre-requisite component for any ICP4A capability. + ## + ## This template CR contains the minimal configuration required for foundation components, since the ICP4A operator + ## will take care of defaults for a typicall configuration. + ## + ## For BAN, RR, UMS and BAI you will not find dedicated configuration sections in this template since + ## default configurations are being used by the operator. + ## + ## For BAB, UMS and BAI, you will only find configuration parameters in the share_configuration section that need + ## to be configured. + ## + ## For BAS, you will find a minimal bastudio_configuration setion is in the CR, which will need to be present + ## and configured by you should if you are planning to install any of the following ICP4A capabilities and features: + ## - Automation Content Analyzer (contentanlyzer) + ## - Automation Digital Worker (digitalworker) + ## - the Applications Designer (app_designer) feature with Business Automation Application (application) + ## - the Decision Designer feature (ads_designer) with Automation Decision Services (decisions_ads) + ## + ## Note: In case non of these is to be installed, the bastudio_configuration should be commented out or deleted + ## from the final CR that will be applied. + ## + ## How to use this template: + ## + ## Make a copy of this CR foundation template as your starting point ICP4A CR yaml you are manually assembling. + ## Then paste into that CR the information and configuration from the respective + ## ibm_cp4a_cr_enterprise_.yaml CR template files available in the same location. + ## + ###################################################################################################################### + shared_configuration: + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_fncm_license: "" + + ## Business Automation Workflow (BAW) license and possible values are: user, non-production, and production. + ## This value could be different from the other licenses in the CR. + sc_deployment_baw_license: "" + + ## Use this parameter to specify the license for the CP4A deployment and + ## the possible values are: non-production and production and if not set, the license will + ## be defaulted to production. This value could be different from the other licenses in the CR. + sc_deployment_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + images: + keytool_job_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-jobcontainer + tag: 20.0.2 + dbcompatibility_init_container: + repository: cp.icr.io/cp/cp4a/aae/dba-dbcompatibility-initcontainer + tag: 20.0.2 + keytool_init_container: + repository: cp.icr.io/cp/cp4a/ums/dba-keytool-initcontainer + tag: 20.0.2 + umsregistration_initjob: + repository: cp.icr.io/cp/cp4a/aae/dba-umsregistration-initjob + tag: 20.0.2 + + ## All CP4A components should use this pull_policy as the default, but it can override by each component + pull_policy: IfNotPresent + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + ## CP4A patterns or capabilities to be deployed. This CR represents the "application" pattern (aka Business Automation Manager), which includes the following + ## mandatory components: ban(Business Automation Navigator), ums (User Management Service), app_engine( Application Engine) and optional components: app_designerollowing + ## mandatory components: icn (BAN/Navigator), rr (Resource Registry) and optional components: ums, bas, and bai + sc_deployment_patterns: foundation + + ## The optional components to be installed if listed here. This is normally populated by the User script based on input from the user. User can + ## also manually specify the optional components to be deployed here. For this foundation CR, the optional components are: ums, bas, and bai + sc_optional_components: + + ## The deployment type as selected by the user. Possible values are: demo, enteprise + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" + + ############################################################################################## + # Kafka client configuration for IBM Business Automation Insights and other ICP4A products. + # + # The customization of the following 4 parameters is "" only if you have + # specificed "bai" as part of the sc_optional_components to specify that Business Automation + # Insights must be installed. + # + # Otherwise, if Business Automation Insights is not being installed, there is no need to configure + # these parameters and they can be kept empty. + ############################################################################################## + kafka_configuration: + # Comma-separated list of hosts:port for connection to the Kafka cluster. + # This field is mandatory for any Kafka configuration. + bootstrap_servers: "" + # Value for the Kafka security.protocol property + # Valid values: PLAINTEXT, SSL, SASL_PLAINTEXT, SASL_SSL. Default: PLAINTEXT. + security_protocol: + # Value for the Kafka sasl.mechanism property + # Valid values: PLAIN, SCRAM-SHA-512. Default: PLAIN. + sasl_mechanism: + # If the Kafka server requires authentication or uses SSL communications, the value of this field + # must provide the name of a secret that holds the following keys as base64-encoded strings: + # kafka-username: Kafka username; leave empty if no authentication + # kafka-password: Kafka password; leave empty if no authentication + # kafka-server-certificate: server certificate for SSL communications; leave empty if SSL protocol is not used + connection_secret_name: + + ## The beginning section of LDAP configuration for CP4A + ldap_configuration: + ## The possible values are: "IBM Security Directory Server" or "Microsoft Active Directory" + lc_selected_ldap_type: "" + + ## The name of the LDAP server to connect + lc_ldap_server: "" + + ## The port of the LDAP server to connect. Some possible values are: 389, 636, etc. + lc_ldap_port: "" + + ## The LDAP bind secret for LDAP authentication. The secret is expected to have ldapUsername and ldapPassword keys. Refer to Knowledge Center for more info. + lc_bind_secret: ldap-bind-secret + + ## The LDAP base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_base_dn: "" + + ## Enable SSL/TLS for LDAP communication. Refer to Knowledge Center for more info. + lc_ldap_ssl_enabled: true + + ## The name of the secret that contains the LDAP SSL/TLS certificate. + lc_ldap_ssl_secret_name: "" + + ## The LDAP user name attribute. One possible value is "*:cn" for TDS and "user:sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_name_attribute: "" + + ## The LDAP user display name attribute. One possible value is "cn" for TDS and "sAMAccountName" for AD. Refer to Knowledge Center for more info. + lc_ldap_user_display_name_attr: "" + + ## The LDAP group base DN. For example, "dc=example,dc=com", "dc=abc,dc=com", etc + lc_ldap_group_base_dn: "" + + ## The LDAP group name attribute. One possible value is "*:cn" for TDS and "*:cn" for AD. Refer to Knowledge Center for more info. + lc_ldap_group_name_attribute: "*:cn" + + ## The LDAP group display name attribute. One possible value for both TDS and AD is "cn". Refer to Knowledge Center for more info. + lc_ldap_group_display_name_attr: "cn" + + ## The LDAP group membership search filter string. One possible value is "(&(cn=%v)(|(objectclass=groupOfNames)(objectclass=groupOfUniqueNames)(objectclass=groupOfURLs))" for TDS + ## and "(&(cn=%v)(objectcategory=group))" for AD. + lc_ldap_group_membership_search_filter: "" + + ## The LDAP group membership ID map. One possible value is "groupofnames:member" for TDS and "memberOf:member" for AD. + lc_ldap_group_member_id_map: "" + + ## The User script will uncomment the section needed based on user's input from User script. If you are deploying without the User script, + ## uncomment the necessary section (depending if you are using Active Directory (ad) or Tivoli Directory Service (tds)) accordingly. + # ad: + # lc_ad_gc_host: "" + # lc_ad_gc_port: "" + # lc_user_filter: "(&(samAccountName=%v)(objectClass=user))" + # lc_group_filter: "(&(samAccountName=%v)(objectclass=group))" + # tds: + # lc_user_filter: "(&(cn=%v)(objectclass=person))" + # lc_group_filter: "(&(cn=%v)(|(objectclass=groupofnames)(objectclass=groupofuniquenames)(objectclass=groupofurls)))" + + ## The beginning section of database configuration for CP4A + datasource_configuration: + ## The database configuration for ICN (Navigator) - aka BAN (Business Automation Navigator) + dc_icn_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "db2HADR" or "oracle". This should be the same as the + ## GCD and object store configuration above. + dc_database_type: "" + ## Provide the ICN datasource name. The default value is "ECMClientDS". + dc_common_icn_datasource_name: "ECMClientDS" + database_servername: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521" + database_port: "" + ## Provide the name of the database for ICN (Navigator). For example: "ICNDB" + database_name: "" + ## If the database type is Oracle, provide the Oracle DB connection string. For example, "jdbc:oracle:thin:@//:1521/orcl" + dc_oracle_icn_jdbc_url: "" + ###################################################################################### + ## If the database type is "Db2HADR", then complete the rest of the parameters below. + ## Otherwise, remove or comment out the rest of the parameters below. + ###################################################################################### + dc_hadr_standby_servername: "" + ## Provide the standby database server port. For Db2, the default is "50000". + dc_hadr_standby_port: "" + ## Provide the validation timeout. If not preference, keep the default value. + dc_hadr_validation_timeout: 15 + ## Provide the retry internal. If not preference, keep the default value. + dc_hadr_retry_interval_for_client_reroute: 15 + ## Provide the max # of retries. If not preference, keep the default value. + dc_hadr_max_retries_for_client_reroute: 3 + + ## The database configuration for UMS (User Management Service) + dc_ums_datasource: + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_oauth_alternate_hosts and dc_ums_oauth_alternate_ports + ## are set. + dc_ums_oauth_type: "" + ## Provide the database server name or IP address of the database server. + dc_ums_oauth_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_oauth_port: "" + ## Provide the name of the database for UMS. For example: "UMSDB" + dc_ums_oauth_name: "" + dc_ums_oauth_schema: OAuthDBSchema + dc_ums_oauth_ssl: true + dc_ums_oauth_ssl_secret_name: + dc_ums_oauth_driverfiles: + dc_ums_oauth_alternate_hosts: + dc_ums_oauth_alternate_ports: + + ## The database database configuration for teamserver + ## Provide the database type from your infrastructure. The possible values are "db2" or "oracle". This should be the same as the + ## other datasource configuration above. Db2 with HADR is automatically activated if dc_ums_teamserver_alternate_hosts and dc_ums_teamserver_alternate_ports + ## are set. + dc_ums_teamserver_type: "" + dc_ums_teamserver_host: "" + ## Provide the database server port. For Db2, the default is "50000". For Oracle, the default is "1521". + dc_ums_teamserver_port: "" + ## Provide the name of the database for UMS teamserver. For example: "UMSDB" + dc_ums_teamserver_name: "" + dc_ums_teamserver_ssl: true + dc_ums_teamserver_ssl_secret_name: + dc_ums_teamserver_driverfiles: + dc_ums_teamserver_alternate_hosts: + dc_ums_teamserver_alternate_ports: + + #----------------------------------------------------------------------- + # Configuration for IBM Business Automation Application Studio required for some ICP4A capabilities. + #----------------------------------------------------------------------- + bastudio_configuration: + #Adjust this one if you created the secret with name other than the default + admin_secret_name: "{{ meta.name }}-bas-admin-secret" + #Provide BAStudio default administrator ID + admin_user: "" + database: + #Provide the database server hostname for BAStudio use + host: "" + #Provide the database name for BAStudio use + # The database provided should be created by the BAStudio SQL script template. + name: "" + #Provide the database server port for BAStudio use + port: "" + # If you want to enable database automatic client reroute (ACR) for HADR, you must configure alternative_host and alternative_port. Otherwise, leave them blank. + alternative_host: + alternative_port: + type: db2 + #----------------------------------------------------------------------- + # App Engine Playback Server (playback_server) can be only one instance. This is different from App Engine (where application_engine_configuration is a list and you can deploy multiple instances). + #----------------------------------------------------------------------- + playback_server: + #Adjust this one if you created the secret with name other than the default + admin_secret_name: playback-server-admin-secret + #Provide playback application engine default administrator ID + admin_user: "" + database: + #Provide the database server hostname for playback application engine use + host: "" + #Provide the database name for playback application engine use + name: "" + #Provide the database server port for playback application engine use + port: "" + ## If you set up DB2 HADR and want to use it, you must configure alternative_host and alternative_port. Otherwise, leave them blank. + alternative_host: + alternative_port: + type: db2 + + ######################################################################## + ######## IBM User and Group Management Service configuration ######## + ######################################################################## + ums_configuration: + images: + ums: + repository: cp.icr.io/cp/cp4a/ums/ums + tag: 20.0.2 + + ######################################################################## + ######## IBM Business Automation Insights configuration ######## + ######################################################################## + bai_configuration: \ No newline at end of file diff --git a/descriptors/patterns/ibm_cp4a_cr_enterprise_ier_content.yaml b/descriptors/patterns/ibm_cp4a_cr_enterprise_ier_content.yaml new file mode 100644 index 00000000..016905fa --- /dev/null +++ b/descriptors/patterns/ibm_cp4a_cr_enterprise_ier_content.yaml @@ -0,0 +1,72 @@ + +############################################################################### +## +##Licensed Materials - Property of IBM +## +##(C) Copyright IBM Corp. 2020. All Rights Reserved. +## +##US Government Users Restricted Rights - Use, duplication or +##disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +## +############################################################################### +apiVersion: icp4a.ibm.com/v1 +kind: ICP4ACluster +metadata: + name: icp4adeploy + labels: + app.kubernetes.io/instance: ibm-dba + app.kubernetes.io/managed-by: ibm-dba + app.kubernetes.io/name: ibm-dba + release: 20.0.2 +spec: + ########################################################################## + ## This section contains the shared configuration for all CP4A components # + ########################################################################## + appVersion: 20.0.2 + shared_configuration: + + ## FileNet Content Manager (FNCM) license and possible values are: user, non-production, and production. + ## This value could be different from the rest of the licenses. + sc_deployment_fncm_license: "" + + ## All CP4A components must use/share the image_pull_secrets to pull images + image_pull_secrets: + - admin.registrykey + + ## All CP4A components must use/share the same docker image repository. For example, if IBM Entitled Registry is used, then + ## it should be "cp.icr.io". Otherwise, it will be a local docker registry. + sc_image_repository: cp.icr.io + + ## All CP4A components must use/share the root_ca_secret in order for integration + root_ca_secret: icp4a-root-ca + + + ## The optional components to be installed is listed here. This is normally populated by the User script based on input from the user. + ## The optional components are: cmis, es (External Share), tm (Task Manager) and ier (IBM Enterprise Records) + + sc_optional_components: ier + + ## The deployment type as selected by the user. Possible values are: demo, enterprise + sc_deployment_type: enterprise + + ## The platform to be deployed specified by the user. Possible values are: OCP and other. This is normally populated by the User script + ## based on input from the user. + sc_deployment_platform: + + ## For OCP, this is used to create route, you should input a valid hostname in the required field. + sc_deployment_hostname_suffix: "{{ meta.name }}." + + ## If the root certificate authority (CA) key of the external service is not signed by the operator root CA key, provide the TLS certificate of + ## the external service to the component's truststore. + trusted_certificate_list: [] + + ## Shared encryption key secret name that is used for Workstream Services and Process Federation Server integration. + encryption_key_secret: icp4a-shared-encryption-key + + ## On OCP 3.x and 4.x, the User script will populate these three (3) parameters based on your input for "enterprise" deployment. + ## If you manually deploying without using the User script, then you would provide the different storage classes for the slow, medium + ## and fast storage parameters below. If you only have 1 storage class defined, then you can use that 1 storage class for all 3 parameters. + storage_configuration: + sc_slow_file_storage_classname: "" + sc_medium_file_storage_classname: "" + sc_fast_file_storage_classname: "" diff --git a/descriptors/role.yaml b/descriptors/role.yaml index 3c467546..54966c20 100644 --- a/descriptors/role.yaml +++ b/descriptors/role.yaml @@ -17,7 +17,7 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 rules: - apiGroups: - "" @@ -124,6 +124,7 @@ rules: - "ingresses" - "jobs" - "deployments" + - "networkpolicies" - "replicasets" verbs: - "*" diff --git a/descriptors/role_binding.yaml b/descriptors/role_binding.yaml index c7ac3974..3dccccff 100644 --- a/descriptors/role_binding.yaml +++ b/descriptors/role_binding.yaml @@ -16,7 +16,7 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 + release: 20.0.2 subjects: - kind: ServiceAccount name: ibm-cp4a-operator diff --git a/descriptors/scc-fncm.yaml b/descriptors/scc-fncm.yaml deleted file mode 100755 index 96feb774..00000000 --- a/descriptors/scc-fncm.yaml +++ /dev/null @@ -1,38 +0,0 @@ -allowHostDirVolumePlugin: false -allowHostIPC: false -allowHostNetwork: false -allowHostPID: false -allowHostPorts: false -allowPrivilegeEscalation: true -allowPrivilegedContainer: false -allowedCapabilities: [] -apiVersion: security.openshift.io/v1 -defaultAddCapabilities: [] -fsGroup: - type: RunAsAny -groups: -- system:authenticated -kind: SecurityContextConstraints -metadata: - name: ibm-fncm-operator -priority: 0 -readOnlyRootFilesystem: false -requiredDropCapabilities: -- KILL -- MKNOD -- SETUID -- SETGID -runAsUser: - type: MustRunAsRange -seLinuxContext: - type: MustRunAs -supplementalGroups: - type: RunAsAny -users: [] -volumes: -- configMap -- downwardAPI -- emptyDir -- persistentVolumeClaim -- projected -- secret diff --git a/descriptors/service_account.yaml b/descriptors/service_account.yaml index 8a3813bd..0fb3ea82 100644 --- a/descriptors/service_account.yaml +++ b/descriptors/service_account.yaml @@ -16,6 +16,4 @@ metadata: app.kubernetes.io/instance: ibm-dba app.kubernetes.io/managed-by: ibm-dba app.kubernetes.io/name: ibm-dba - release: 20.0.1 -imagePullSecrets: -- name: hyc-icpcontent-docker-local.artifactory.swg-devops.com + release: 20.0.2 diff --git a/platform/k8s/README.md b/platform/k8s/README.md deleted file mode 100644 index 3b485697..00000000 --- a/platform/k8s/README.md +++ /dev/null @@ -1,14 +0,0 @@ -# IBM Cloud Pak for Automation 20.0.1 on Certified Kubernetes - -Any platform that includes a Certified Kubernetes version 1.11+ is supported by Cloud Pak for Automation 20.0.1. Use the following link to determine whether the vendor and/or platform is certified by Cloud Native Computing Foundation (CNCF) https://landscape.cncf.io/category=platform. - -The instructions provided apply generically for a Certified Kubernetes installation. Details might vary depending on which platform you are using. - -Choose which use case you need, and then follow the links below to find the right instructions: - -- [Install Cloud Pak for Automation 20.0.1 on Certified Kubernetes](install.md) -- [Uninstall Cloud Pak for Automation 20.0.1 on Certified Kubernetes](uninstall.md) -- [Upgrade Cloud Pak for Automation 19.0.3 to 20.0.1 on Certified Kubernetes](upgrade.md) -- [Migrate 19.0.x persisted data to 20.0.1 on Certified Kubernetes](migrate.md) -- [Update Cloud Pak for Automation 20.0.1 on Certified Kubernetes](update.md) - diff --git a/platform/k8s/install.md b/platform/k8s/install.md deleted file mode 100644 index ccc5cc40..00000000 --- a/platform/k8s/install.md +++ /dev/null @@ -1,306 +0,0 @@ -# Installing Cloud Pak for Automation 20.0.1 on Certified Kubernetes - -- [Step 1: Get access to the container images](install.md#step-1-get-access-to-the-container-images) -- [Step 2: Prepare your environment for automation software](install.md#step-2-prepare-your-environment-for-automation-software) -- [Step 3: Create a shared PV and add the JDBC drivers](install.md#step-3-create-a-shared-pv-and-add-the-jdbc-drivers) -- [Step 4: Deploy the operator manifest files to your cluster](install.md#step-4-deploy-the-operator-manifest-files-to-your-cluster) -- [Step 5: Configure the software that you want to install](install.md#step-5-configure-the-software-that-you-want-to-install) -- [Step 6: Apply the custom resources](install.md#step-6-apply-the-custom-resources) -- [Step 7: Verify that the automation containers are running](install.md#step-7-verify-that-the-automation-containers-are-running) -- [Step 8: Complete some post-installation steps](install.md#step-8-complete-some-post-installation-steps) - -## Step 1: Get access to the container images - -You can access the container images in the IBM Docker registry with your IBMid (Option 1), or you can use the downloaded archives from IBM Passport Advantage (PPA) (Option 2). - -1. Log in to your Kubernetes cluster. -2. Create a namespace in which you want to install the operator. - ```bash - $ kubectl create namespace - ``` -3. Change the context to the namespace you created. - ```bash - $ kubectl config set-context --current --namespace= - ``` - > **Note**: You need a privileged account to run the policy commands. The must have pull request privileges to the registry where the images are loaded. The must also have pull request privileges to push the images into another namespace. -4. Download or clone the repository on your local machine and go to the `cert-kubernetes` directory. - ```bash - $ git clone git@github.com:icp4a/cert-kubernetes.git - $ cd cert-kubernetes - ``` - You will find there the scripts and kubernetes descriptors that are necessary to install Cloud Pak for Automation. - -Before you go to Step 2, make sure that your entitled container images are available and accessible by following Option 1 or Option 2. - -### Option 1: Create a pull secret for the IBM Cloud Entitled Registry - -1. Log in to [MyIBM Container Software Library](https://myibm.ibm.com/products-services/containerlibrary) with the IBMid and password that are associated with the entitled software. - -2. In the **Container software library** tile, verify your entitlement on the **View library** page, and then go to **Get entitlement key** to retrieve the key. - -3. Create a pull secret by running a `kubectl create secret` command. - ```bash - $ kubectl create secret docker-registry admin.registrykey --docker-server=cp.icr.io --docker-username=iamapikey --docker-password="" --docker-email=user@foo.com - ``` - - > **Note**: The `cp.icr.io` value for the **docker-server** parameter is the only registry domain name that contains the images. - - > **Note**: Use “cp” for the docker-username. The docker-email has to be a valid email address (associated to your IBM ID). Make sure you are copying the Entitlement Key in the docker-password field within double-quotes. - -4. Take a note of the secret and the server values so that you can set them to the **pullSecrets** and **repository** parameters when you run the operator for your containers. - -### Option 2: Download the packages from PPA and load the images - -[IBM Passport Advantage (PPA)](https://www-01.ibm.com/software/passportadvantage/pao_customer.html) provides archives (.tgz) for the software. To view the list of Passport Advantage eAssembly installation images, refer to the [20.0.1 download document](https://www.ibm.com/support/pages/ibm-cloud-pak-automation-v2001-download-document). - -1. Download one or more PPA packages to a server that is connected to your Docker registry.. -2. Check that you can run a docker command. - ```bash - $ docker ps - ``` -3. Login to a Docker registry with your credentials.. - ```bash - $ docker login -u - ``` -4. Run a `kubectl` command to make sure that you have access to Kubernetes. - ```bash - $ kubectl cluster-info - ``` -5. Run the [`scripts/loadimages.sh`](../../scripts/loadimages.sh) script to load the images into your Docker registry. Specify the two mandatory parameters in the command line. - - ``` - -p PPA archive files location or archive filename - -r Target Docker registry and namespace - -l Optional: Target a local registry - ``` - - The following example shows the input values in the command line. - - ``` - # scripts/loadimages.sh -p .tgz -r /namespace - ``` - - > **Note**: You must have pull request privileges to the registry where the images are loaded. - -6. Check that the images are pushed correctly to the registry. -7. If you want to use an external Docker registry, create a Docker registry secret. - - ```bash - $ kubectl create secret docker-registry admin.registrykey --docker-server= --docker-username= --docker-password= --docker-email= - ``` - - Take a note of the secret and the server values so that you can set them to the **pullSecrets** and **repository** parameters when you run the operator for your containers. - -## Step 2: Prepare your environment for automation software - -Before you install any of the containerized software: - -1. Go to the prerequisites page in the [IBM Cloud Pak for Automation 20.0.x](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_env_k8s.html) Knowledge Center. -2. Follow the instructions on preparing your environment for the software components that you want to install. - - How much preparation you need to do depends on what you want to install and how familiar you are with your environment. - -## Step 3: Create a shared PV and add the JDBC drivers - - 1. Create a persistent volume (PV) for the operator. This PV is needed for the JDBC drivers. The following example YAML defines a PV, but PVs depend on your cluster configuration. - ```yaml - apiVersion: v1 - kind: PersistentVolume - metadata: - labels: - type: local - name: operator-shared-pv - spec: - capacity: - storage: 1Gi - accessModes: - - ReadWriteMany - hostPath: - path: "/root/operator" - persistentVolumeReclaimPolicy: Delete - ``` - - 2. Deploy the PV. - ```bash - $ kubectl create -f operator-shared-pv.yaml - ``` - - 3. Create a claim for the PV, or check that the PV is bound dynamically, [descriptors/operator-shared-pvc.yaml](../../descriptors/operator-shared-pvc.yaml?raw=true). - - > Replace the storage class if you do not want to create the relevant persistent volume. - - ```yaml - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: operator-shared-pvc - namespace: my-project - spec: - accessModes: - - ReadWriteMany - storageClassName: "" - resources: - requests: - storage: 1Gi - volumeName: operator-shared-pv - ``` - - 4. Deploy the PVC. - ```bash - $ kubectl create -f descriptors/operator-shared-pvc.yaml - ``` - - 5. Copy all of the JDBC drivers that are needed by the components you intend to install to the persistent volume. Depending on your storage configuration you might not need these drivers. - - > **Note**: File names for JDBC drivers cannot include additional version information. - - DB2: - - db2jcc4.jar - - db2jcc_license_cu.jar - - Oracle: - - ojdbc8.jar - - The following structure shows an example remote file system. - - ``` - pv-root-dir - - └── jdbc - - ├── db2 - - │ ├── db2jcc4.jar - - │ └── db2jcc_license_cu.jar - - ├── oracle - - │ └── ojdbc8.jar - - ``` - -## Step 4: Deploy the operator manifest files to your cluster - -The Cloud Pak operator has a number of descriptors that must be applied. - - [descriptors/ibm_icp4a_crd.yaml](../../descriptors/ibm_icp4a_crd.yaml?raw=true) contains the description of the Custom Resource Definition. - - [descriptors/operator.yaml](../../descriptors/operator.yaml?raw=true) defines the deployment of the operator code. - - [descriptors/role.yaml](../../descriptors/role.yaml?raw=true) defines the access of the operator. - - [descriptors/role_binding.yaml](../../descriptors/role_binding.yaml?raw=true) defines the access of the operator. - - [descriptors/service_account.yaml](../../descriptors/service_account.yaml?raw=true) defines the identity for processes that run inside the pods of the operator. - -1. Deploy the icp4a-operator on your cluster. - - Use the script [scripts/deployOperator.sh](../../scripts/deployOperator.sh) to deploy these descriptors. - ```bash - $ ./scripts/deployOperator.sh -i /icp4a-operator:20.0.1 -p '' -a accept - ``` - - Where *registry_url* is the value for your internal docker registry or `cp.icr.io/cp/cp4a` for the IBM Cloud Entitled Registry and *my_secret_name* the secret created to access the registry, and *accept* means that you accept the [license](../../LICENSE). - -2. Monitor the pod until it shows a STATUS of *Running*: - ```bash - $ kubectl get pods -w - ``` - > **Note**: When started, you can monitor the operator logs with the following command: - ```bash - $ kubectl logs -f deployment/ibm-cp4a-operator -c operator - ``` - -## Step 5: Configure the software that you want to install - -A custom resource (CR) YAML file is a configuration file that describes an ICP4ACluster instance and includes the parameters to install some or all of the components. - -1. Make a copy of the template custom resource YAML file [descriptors/ibm_cp4a_cr_template.yaml](../../descriptors/ibm_cp4a_cr_template.yaml?raw=true) and name it appropriately for your deployment (for example descriptors/my_icp4a_cr.yaml). - - > **Important:** Because the maximum length of labels in Kubernetes is 63 characters, be careful with the lengths of your CR name and instance names. Some components can configure multiple instances, each instance must have a different name. The total length of the CR name and an instance name must not exceed 24 characters, otherwise some component deployments fail. - - You must use a single custom resource file to include all of the components that you want to deploy with an operator instance. Each time that you need to make an update or modification you must use this same file to apply the changes to your deployments. When you apply a new custom resource to an operator you must make sure that all previously deployed resources are included if you do not want the operator to delete them. - -2. Change the default name of your instance in descriptors/my_icp4a_cr.yaml. - - ```yaml - metadata: - name: - ``` - -3. If you plan to install UMS and/or AAE and you use the IBM entitled registry, uncomment the lines for the `image_pull_secrets` and `images` parameters in the `shared_configuration` section. - - If you use an internal registry, enter your values for these parameters. - - ```yaml - shared_configuration: - image_pull_secrets: - - - images: - keytool_job_container: - repository: :5000//dba-keytool-initcontainer - tag: 20.0.1 - dbcompatibility_init_container: - repository: :5000//dba-dbcompatibility-initcontainer - tag: 20.0.1 - keytool_init_container: - repository: :5000//dba-keytool-jobcontainer - tag: 20.0.1 - umsregistration_initjob: - repository: :5000//dba-umsregistration-initjob - tag: 20.0.1 - pull_policy: IfNotPresent - ``` - - | Parameter | Description | - | ------------------------------- | --------------------------------------------- | - | `keytool_job_container` | Repository from where to pull the UMS keytool_job_container and the corresponding tag | - | `dbcompatibility_init_container` | Repository from where to pull the AAE init_container and the corresponding tag | - | `keytool_init_container` | Repository from where to pull the UMS keytool_init_container and the corresponding tag | - | `umsregistration_initjob` | Repository from where to pull the AAE umsregistration_initjob and the corresponding tag | - | `image_pull_secrets` | Secrets in your target namespace to pull images from the specified repository | - - > **Note:** If you do not plan to install UMS or AAE, you can leave these lines commented in your copy of the custom resource template file. - -4. Use the following links to configure the software that you want to install. - - - [Configure IBM Automation Digital Worker](../../ADW/README_config.md) - - [Configure IBM Automation Workstream Services](../../IAWS/README_config.md) - - [Configure IBM Business Automation Application Engine](../../AAE/README_config.md) - - [Configure IBM Business Automation Content Analyzer](../../ACA/README_config.md) - - [Configure IBM Business Automation Insights](../../BAI/README_config.md) - - [Configure IBM Business Automation Navigator](../../BAN/README_config.md) - - [Configure IBM Business Automation Studio](../../BAS/README_config.md) - - [Configure IBM FileNet Content Manager](../../FNCM//README_config.md) - - [Configure IBM Operational Decision Manager](../../ODM/README_config.md) - - [Configure the User Management Service](../../UMS/README_config.md) - -## Step 6: Apply the custom resources - -1. Check that all the components you want to install are configured. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Deploy the configured components by applying the custom resource. - - ```bash - $ kubectl apply -f descriptors/my_icp4a_cr.yaml - ``` - -## Step 7: Verify that the automation containers are running - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ kubectl get pods -w -``` - -When all of the pods are *Running*, you can access the status of your services with the following commands. -```bash -$ kubectl cluster-info -$ kubectl get services -``` -You can now expose the services to your users. - -Refer to the [Troubleshooting section](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html) to access the operator logs. - -## Step 8: Complete some post-installation steps - -Go to [IBM Knowledge Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_deploy_postdeployk8s.html) to follow the post-installation steps. diff --git a/platform/k8s/migrate.md b/platform/k8s/migrate.md deleted file mode 100644 index 2e0bbad0..00000000 --- a/platform/k8s/migrate.md +++ /dev/null @@ -1,20 +0,0 @@ -# Migrating Cloud Pak for Automation data on Certified Kubernetes - -To migrate your 19.0.x data to 20.0.1, uninstall your current deployment and follow the migration instructions for each component to point to the existing persistent stores. - -## Step 1: Prepare your environment and take note of your existing storage settings - -Use the following links to help you find the relevant software storage settings that you want to migrate. - -- [Configure IBM Business Automation Application Engine](../../AAE/README_migrate.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_migrate.md) -- [Configure IBM Business Automation Insights](../../BAI/README_migrate.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_migrate.md) -- [Configure IBM Business Automation Studio](../../BAS/README_migrate.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_migrate.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_migrate.md) -- [Configure the User Management Service](../../UMS/README_migrate.md) - -## Step 2: Install your chosen components with the operator - - When you have completed all of the preparation steps for each of the components that you want to migrate, follow the instructions in the [installation](install.md) readme. diff --git a/platform/k8s/uninstall.md b/platform/k8s/uninstall.md deleted file mode 100644 index 188025d2..00000000 --- a/platform/k8s/uninstall.md +++ /dev/null @@ -1,30 +0,0 @@ -# Uninstalling Cloud Pak for Automation 20.0.1 on Certified Kubernetes - -## Delete your automation instances - -You can delete your custom resource (CR) deployments by deleting the CR YAML file or the CR instance. The name of the instance is taken from the value of the `name` parameter in the CR YAML file. The following command is used to delete an instance. - -```bash -  $ kubectl delete ICP4ACluster -``` - -If you want to uninstall IBM Automation Digital Worker and unsubscribe from the IBM Business Automation Studio, you will have to scale up the IBM Automation Digital Worker unsubscribe ReplicaSet before deleting your automation instances. Despite the pod status, as an unsubscribe success proof, you should look into the pod's log for a `STATUS=success` statement. - -```bash - $ kubectl scale replicasets -adw-registry-unsubscribe --replicas=1 -``` - -> **Note**: You can get the names of the ICP4ACluster instances with the following command: - ```bash - $ kubectl get ICP4ACluster - ``` - -## Delete the operator instance and all associated automation instances - -Use the [`scripts/deleteOperator.sh`](../../scripts/deleteOperator.sh) to delete all the resources that are linked to the operator. - -```bash - $ ./scripts/deleteOperator.sh -``` - -Verify that all the pods created with the operator are terminated and deleted. diff --git a/platform/k8s/update.md b/platform/k8s/update.md deleted file mode 100644 index ccd5fc77..00000000 --- a/platform/k8s/update.md +++ /dev/null @@ -1,53 +0,0 @@ -# Updating Cloud Pak for Automation 20.0.1 on Certified Kubernetes - -- [Step 1: Modify the software that is installed](update.md#step-1-modify-the-software-that-is-installed) -- [Step 2: Apply the updated custom resources](update.md#step-2-apply-the-updated-custom-resources) -- [Step 3: Verify the updated automation containers](update.md#step-3-verify-the-updated-automation-containers) - -## Step 1: Modify the software that is installed - -An update to the custom resource (CR), overwrites the deployed resources during the operator control loop (observe, analyze, act) that occurs as a result of constantly watching the state of the Kubernetes resources. - -Use the following links to configure the software that is already installed. You can modify the installed software, remove it, or add new components. Use the same CR YAML file that you deployed with the operator to make the updates (for example descriptors/my_icp4a_cr.yaml). - -- [Configure IBM Automation Digital Worker](../../ADW/README_config.md) -- [Configure IBM Automation Workstream Services](../../IAWS/README_config.md) -- [Configure IBM Business Automation Application Engine](../../AAE/README_config.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_config.md) -- [Configure IBM Business Automation Insights](../../BAI/README_config.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_config.md) -- [Configure IBM Business Automation Studio](../../BAS/README_config.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_config.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_config.md) -- [Configure the User Management Service](../../UMS/README_config.md) - -## Step 2: Apply the updated custom resources - -1. Review your CR YAML file to make sure it contains all of your intended modifications. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Run the following commands to apply the updates to the operator: - - ```bash - $ kubectl apply -f descriptors/my_icp4a_cr.yaml --overwrite=true - ``` - -## Step 3: Verify the updated automation containers - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ kubectl get pods -w -``` - -When all of the pods are *Running*, you can access the status of your services with the following commands. -```bash -$ kubectl cluster-info -$ kubectl get services -``` - -Refer to the [Troubleshooting section](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html) to access the operator logs. diff --git a/platform/k8s/upgrade.md b/platform/k8s/upgrade.md deleted file mode 100644 index 3a7896bc..00000000 --- a/platform/k8s/upgrade.md +++ /dev/null @@ -1,69 +0,0 @@ -# Upgrading from IBM Cloud Pak for Automation 19.0.3 to 20.0.1 on Certified Kubernetes - -If you installed any of the Cloud Pak for Automation 19.0.3 components on a Kubernetes cluster and you want to continue to use them in 20.0.1, you can upgrade them. - -> **Note:** If you are looking to upgrade Automation Digital Worker (ADW) 19.0.3 to 20.0.1, you must contact [IBM Support]( https://www.ibm.com/mysupport/s/) and open a support case. After your case is submitted, IBM support contacts you. - -## Step 1: Get access to the new container images - -Follow the instructions in step 1 of [Installing Cloud Pak for Automation 20.0.1 on certified Kubernetes](install.md#step-1-create-a-namespace-and-get-access-to-the-container-images) to clone the 20.0.1 GitHub repository and to get access to the new docker images. - -## Step 2: Update the operator version number to 20.0.1 - -1. Log in to the target cluster. -2. Go to the downloaded `cert-kubernetes.git` for 20.0.1, and change directory to cert-kubernetes. - ```bash - $ cd cert-kubernetes - ``` -3. Upgrade the icp4a-operator on your cluster. - - Use the 20.0.1 [scripts/upgradeOperator.sh](../../scripts/upgradeOperator.sh) script to deploy the operator manifest descriptors. - ```bash - $ ./scripts/upgradeOperator.sh -i /icp4a-operator:20.0.1 -p '' -a accept - ``` - - Where *registry_url* is the value for your internal docker registry or `cp.icr.io/cp/cp4a` for the IBM Cloud Entitled Registry, *my_secret_name* is the secret created to access the registry, and *accept* means that you accept the [license](../../LICENSE). - - > **Note**: If you plan to use a non-admin user to install the operator, you must add the user to the `ibm-cp4-operator` role. For example: - ```bash - $ kubectl adm policy add-role-to-user ibm-cp4a-operator - ``` - -## Step 3: Update the image versions in the custom resource YAML file for your deployment - -Get the custom resource YAML file that you deployed and edit it by following the instructions for each component: - -- [Configure IBM Automation Workstreams Services](../../IAWS/README_upgrade.md) -- [Configure IBM Business Automation Application Engine](../../AAE/README_upgrade.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_upgrade.md) -- [Configure IBM Business Automation Insights](../../BAI/README_upgrade.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_upgrade.md) -- [Configure IBM Business Automation Studio](../../BAS/README_upgrade.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_upgrade.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_upgrade.md) -- [Configure the User Management Service](../../UMS/README_upgrade.md) - -## Step 4: Apply the updated custom resource to upgrade from 19.0.3 to 20.0.1 - -1. Check that all the components that you want to upgrade are configured. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Update the configured components by applying the custom resource. - - ```bash - $ kubectl apply -f descriptors/my_icp4a_cr.yaml - ``` - -## Step 5: Verify the applications - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ kubectl get pods -w -``` - -Log in to the web applications in your deployment and verify that they are ready and can be accessed. diff --git a/platform/ocp/README.md b/platform/ocp/README.md deleted file mode 100644 index 2df53e20..00000000 --- a/platform/ocp/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# IBM Cloud Pak for Automation 20.0.1 on Red Hat OpenShift - -Red Hat OpenShift Cloud Platform 3.11, 4.2, or 4.3 is the target platform for Cloud Pak for Automation 20.0.1. - -The podman (Pod Manager) command in OCP 4.x can be used to run containers outside of Kubernetes and the OpenShift command line interface. The podman tool acts as a replacement for docker with even more container management features. The two command-line interfaces are so similar that you might want to define `alias docker='podman'`. - -Choose which use case you need, and then follow the links below to find the right instructions: - -- [Install Cloud Pak for Automation 20.0.1 on Red Hat OpenShift](install.md) -- [Uninstall Cloud Pak for Automation 20.0.1 on Red Hat OpenShift](uninstall.md) -- [Upgrade Cloud Pak for Automation 19.0.3 to 20.0.1 on Red Hat OpenShift](upgrade.md) -- [Migrate 19.0.1 or 19.0.2 persisted data to 20.0.1 on Red Hat OpenShift](migrate.md) -- [Update Cloud Pak for Automation 20.0.1 on Red Hat OpenShift](update.md) - -> **Note:** If you installed a previous version of Cloud Pak for Automation on OpenShift Cloud Platform (OCP) 3.11 and you want to upgrade OCP to 4.x, you must install a new instance of the ICP4ACluster. You can then follow the instructions in [How to migrate to 20.0.1](migrate.md) to point your new instance to your persisted data. diff --git a/platform/ocp/install.md b/platform/ocp/install.md deleted file mode 100644 index b98742f7..00000000 --- a/platform/ocp/install.md +++ /dev/null @@ -1,332 +0,0 @@ -# Installing Cloud Pak for Automation 20.0.1 on Red Hat OpenShift - -- [Step 1: Create a namespace and get access to the container images](install.md#step-1-create-a-namespace-and-get-access-to-the-container-images) -- [Step 2: Prepare your environment for automation software](install.md#step-2-prepare-your-environment-for-automation-software) -- [Step 3: Create a shared PV and add the JDBC drivers](install.md#step-3-create-a-shared-pv-and-add-the-jdbc-drivers) -- [Step 4: Deploy the operator manifest files to your cluster](install.md#step-4-deploy-the-operator-manifest-files-to-your-cluster) -- [Step 5: Configure the software that you want to install](install.md#step-5-configure-the-software-that-you-want-to-install) -- [Step 6: Apply the custom resource](install.md#step-6-apply-the-custom-resource) -- [Step 7: Verify that the automation containers are running](install.md#step-7-verify-that-the-automation-containers-are-running) -- [Step 8: Complete some post-installation steps](install.md#step-8-complete-some-post-installation-steps) - -## Step 1: Create a namespace and get access to the container images - -From your local machine, you can access the container images in the IBM Docker registry with your IBMid (Option 1), or you can use the downloaded archives from IBM Passport Advantage (PPA) (Option 2). - -1. Log in to your cluster. - ```bash - $ oc login https://:8443 -u - ``` -2. Create an OpenShift project (namespace) in which you want to install the operator. - ```bash - $ oc new-project my-project - ``` -3. Add privileges to the project. - ```bash - $ oc adm policy add-scc-to-user privileged -z my-project - ``` -4. Download or clone the repository on your local machine and change to `cert-kubernetes` directory - ```bash - $ git clone git@github.com:icp4a/cert-kubernetes.git - $ cd cert-kubernetes - ``` - You will find there the scripts and kubernetes descriptors that are necessary to install Cloud Pak for Automation. - -### Option 1: Create a pull secret for the IBM Cloud Entitled Registry - -1. Log in to [MyIBM Container Software Library](https://myibm.ibm.com/products-services/containerlibrary) with the IBMid and password that are associated with the entitled software. - -2. In the **Container software library** tile, verify your entitlement on the **View library** page, and then go to **Get entitlement key** to retrieve the key. - -3. Create a pull secret by running a `kubectl create secret` command. - ```bash - $ kubectl create secret docker-registry admin.registrykey --docker-server=cp.icr.io --docker-username=cp --docker-password="" --docker-email= - ``` - - > **Note**: The `cp.icr.io` value for the **docker-server** parameter is the only registry domain name that contains the images. - - > **Note**: Use “cp” for the docker-username. The docker-email has to be a valid email address (associated to your IBM ID). Make sure you are copying the Entitlement Key in the docker-password field within double-quotes. - -4. Take a note of the secret and the server values so that you can set them to the **pullSecrets** and **repository** parameters when you run the operator for your containers. - -### Option 2: Download the packages from PPA and load the images - -[IBM Passport Advantage (PPA)](https://www-01.ibm.com/software/passportadvantage/pao_customer.html) provides archives (.tgz) for the software. To view the list of Passport Advantage eAssembly installation images, refer to the [20.0.1 download document](https://www.ibm.com/support/pages/ibm-cloud-pak-automation-v2001-download-document). - -1. Download one or more PPA packages to a server that is connected to your Docker registry. -2. Check that you can run a docker command. - ```bash - $ docker ps - ``` -3. Log in to the Docker registry with a token. - ```bash - $ docker login $(oc registry info) -u -p $(oc whoami -t) - ``` - > **Note**: You can connect to a node in the cluster to resolve the `docker-registry.default.svc` parameter. - - You can also log in to an external Docker registry using the following command: - ```bash - $ docker login -u - ``` -4. Run a `kubectl` command to make sure that you have access to Kubernetes. - ```bash - $ kubectl cluster-info - ``` -5. Run the [`scripts/loadimages.sh`](../../scripts/loadimages.sh) script to load the images into your Docker registry. Specify the two mandatory parameters in the command line. - - ``` - -p PPA archive files location or archive filename - -r Target Docker registry and namespace - -l Optional: Target a local registry - ``` - - The following example shows the input values in the command line on OCP 3.11. On OCP 4.2 and 4.3 the default docker registry is based on the host name, for example "default-route-openshift-image-registry.ibm.com". - - ``` - # scripts/loadimages.sh -p .tgz -r docker-registry.default.svc:5000/my-project - ``` - - > **Note**: The project must have pull request privileges to the registry where the images are loaded. The project must also have pull request privileges to push the images into another namespace/project. - -6. Check that the images are pushed correctly to the registry. - ```bash - $ oc get is - ``` -7. If you want to use an external Docker registry, create a Docker registry secret. - - ```bash - $ oc create secret docker-registry admin.registrykey --docker-server= --docker-username= --docker-password= --docker-email= - ``` - - Take a note of the secret and the server values so that you can set them to the **pullSecrets** and **repository** parameters when you run the operator for your containers. - - -## Step 2: Prepare your environment for automation software - -Before you install any of the containerized software: - -1. Go to the prerequisites page in the [IBM Cloud Pak for Automation 20.0.x](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_env_k8s.html) Knowledge Center. -2. Follow the instructions on preparing your environment for the software components that you want to install. - - How much preparation you need to do depends on what you want to install and how familiar you are with your environment. - -## Step 3: Create a shared PV and add the JDBC drivers - -1. Create a persistent volume (PV) for the operator. This PV is needed for the JDBC drivers. The following example YAML defines a PV, but PVs depend on your cluster configuration.  - ```yaml - apiVersion: v1 - kind: PersistentVolume - metadata: - labels: - type: local - name: operator-shared-pv - spec: - capacity: - storage: 1Gi - accessModes: - - ReadWriteMany - hostPath: - path: "/root/operator" - persistentVolumeReclaimPolicy: Delete - ``` - -2. Deploy the PV. - ```bash - $ oc create -f operator-shared-pv.yaml - ``` - -3. Create a claim for the PV. - -To create a claim bound to the previously created PV, create the file `/operator-shared-pvc.yaml` anywhere -on your disk, with the following content: - - ```yaml - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: operator-shared-pvc - namespace: - spec: - accessModes: - - ReadWriteMany - storageClassName: "" - resources: - requests: - storage: 1Gi - volumeName: operator-shared-pv - ``` - -> Replace the `` placeholder by the name of your Openshift project. - -If you prefer to use dynamic provisioning for this claim, edit [descriptors/operator-shared-pvc.yaml](../../descriptors/operator-shared-pvc.yaml?raw=true) -and replace the `` placeholder by a storage storage class of your choice. - -4. Deploy the PVC. -If you created your own `operator-shared-pvc.yaml`: - ```bash - $ oc create -f /operator-shared-pvc.yaml - ``` - -Otherwise, if you edited `descriptors/operator-shared-pvc.yaml`: - ```bash - $ oc create -f descriptors/operator-shared-pvc.yaml - ``` - -5. Copy all of the JDBC drivers that are needed by the components you intend to install to the persistent volume. Depending on your storage configuration you might not need these drivers. - - > **Note**: File names for JDBC drivers cannot include additional version information. - - DB2: - - db2jcc4.jar - - db2jcc_license_cu.jar - - Oracle: - - ojdbc8.jar - - The following structure shows an example remote file system. - - ``` - pv-root-dir - - └── jdbc - - ├── db2 - - │ ├── db2jcc4.jar - - │ └── db2jcc_license_cu.jar - - ├── oracle - - │ └── ojdbc8.jar - - ``` - -## Step 4: Deploy the operator manifest files to your cluster - -The Cloud Pak operator has a number of descriptors that must be applied. - - [descriptors/ibm_cp4a_crd.yaml](../../descriptors/ibm_cp4a_crd.yaml?raw=true) contains the description of the Custom Resource Definition. - - [descriptors/operator.yaml](../../descriptors/operator.yaml?raw=true) defines the deployment of the operator code. - - [descriptors/role.yaml](../../descriptors/role.yaml?raw=true) defines the access of the operator. - - [descriptors/role_binding.yaml](../../descriptors/role_binding.yaml?raw=true) defines the access of the operator. - - [descriptors/service_account.yaml](../../descriptors/service_account.yaml?raw=true) defines the identity for processes that run inside the pods of the operator. - -1. Deploy the icp4a-operator on your cluster. - - Use the script [scripts/deployOperator.sh](../../scripts/deployOperator.sh) to deploy these descriptors. - ```bash - $ ./scripts/deployOperator.sh -i /icp4a-operator:20.0.1 -p '' -a accept - ``` - - Where *registry_url* is the value for your internal docker registry or `cp.icr.io/cp/cp4a` for the IBM Cloud Entitled Registry and *my_secret_name* the secret created to access the registry, and *accept* means that you accept the [license](../../LICENSE). - - > **Note**: If you plan to use a non-admin user to install the operator, you must add the user to the `ibm-cp4-operator` role. For example: - ```bash - $ oc adm policy add-role-to-user ibm-cp4a-operator - ``` - -2. Monitor the pod until it shows a STATUS of *Running*: - ```bash - $ oc get pods -w - ``` - > **Note**: When started, you can monitor the operator logs with the following command: - ```bash - $ oc logs -f deployment/ibm-cp4a-operator -c operator - ``` - -## Step 5: Configure the software that you want to install - -A custom resource (CR) YAML file is a configuration file that describes an ICP4ACluster instance and includes the parameters to install some or all of the components. - -1. Make a copy of the template custom resource YAML file [descriptors/ibm_cp4a_cr_template.yaml](../../descriptors/ibm_cp4a_cr_template.yaml?raw=true) and name it appropriately for your deployment (for example descriptors/my_icp4a_cr.yaml). - - > **Important:** Because the maximum length of labels in Kubernetes is 63 characters, be careful with the lengths of your CR name and instance names. Some components can configure multiple instances, each instance must have a different name. The total length of the CR name and an instance name must not exceed 24 characters, otherwise some component deployments fail. - - You must use a single custom resource file to include all of the components that you want to deploy with an operator instance. Each time that you need to make an update or modification you must use this same file to apply the changes to your deployments. When you apply a new custom resource to an operator you must make sure that all previously deployed resources are included if you do not want the operator to delete them. - -2. Change the default name of your instance in descriptors/my_icp4a_cr.yaml. - - ```yaml - metadata: - name: - ``` - -3. If you plan to install UMS and/or AAE and you use the IBM entitled registry, uncomment the lines for the `image_pull_secrets` and `images` parameters in the `shared_configuration` section. - - If you use an internal registry, enter your values for these parameters. - - ```yaml - shared_configuration: - image_pull_secrets: - - - images: - keytool_job_container: - repository: :5000//dba-keytool-initcontainer - tag: 20.0.1 - dbcompatibility_init_container: - repository: :5000//dba-dbcompatibility-initcontainer - tag: 20.0.1 - keytool_init_container: - repository: :5000//dba-keytool-jobcontainer - tag: 20.0.1 - umsregistration_initjob: - repository: :5000//dba-umsregistration-initjob - tag: 20.0.1 - pull_policy: IfNotPresent - ``` - - | Parameter | Description | - | ------------------------------- | --------------------------------------------- | - | `keytool_job_container` | Repository from where to pull the UMS keytool_job_container and the corresponding tag | - | `dbcompatibility_init_container` | Repository from where to pull the AAE init_container and the corresponding tag | - | `keytool_init_container` | Repository from where to pull the UMS keytool_init_container and the corresponding tag | - | `umsregistration_initjob` | Repository from where to pull the AAE umsregistration_initjob and the corresponding tag | - | `image_pull_secrets` | Secrets in your target namespace to pull images from the specified repository | - - > **Note:** If you do not plan to install UMS or AAE, you can leave these lines commented in your copy of the custom resource template file. - -4. Use the following links to configure the software that you want to install, and you need to make sure the irrelevant softwares are fully commented or removed including the top level of ***_configuration. - - - [Configure IBM Automation Digital Worker](../../ADW/README_config.md) - - [Configure IBM Automation Workstream Services](../../IAWS/README_config.md) - - [Configure IBM Business Automation Application Engine](../../AAE/README_config.md) - - [Configure IBM Business Automation Content Analyzer](../../ACA/README_config.md) - - [Configure IBM Business Automation Insights](../../BAI/README_config.md) - - [Configure IBM Business Automation Navigator](../../BAN/README_config.md) - - [Configure IBM Business Automation Studio](../../BAS/README_config.md) - - [Configure IBM FileNet Content Manager](../../FNCM//README_config.md) - - [Configure IBM Operational Decision Manager](../../ODM/README_config.md) - - [Configure the User Management Service](../../UMS/README_config.md) - -## Step 6: Apply the custom resource - -1. Check that all the components you want to install are configured. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Deploy the configured components by applying the custom resource. - - ```bash - $ oc apply -f descriptors/my_icp4a_cr.yaml - ``` - -## Step 7: Verify that the automation containers are running - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ oc get pods -w -``` - -When all of the pods are *Running*, you can access the status of your services with the following command. -```bash -$ oc status -``` -You can now expose the services to your users. - -Refer to the [Troubleshooting section](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html) to access the operator logs. - -## Step 8: Complete some post-installation steps - -Go to [IBM Knowledge Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_deploy_postdeployk8s.html) to follow the post-installation steps. diff --git a/platform/ocp/migrate.md b/platform/ocp/migrate.md deleted file mode 100644 index 20c6650d..00000000 --- a/platform/ocp/migrate.md +++ /dev/null @@ -1,20 +0,0 @@ -# Migrating Cloud Pak for Automation data on Red Hat OpenShift - -To migrate your 19.0.1 or 19.0.2 data to 20.0.1, uninstall your current deployment and follow the migration instructions for each component to point to the existing persistent stores. - -## Step 1: Prepare your environment and take note of your existing storage settings - -Use the following links to help you find the relevant software storage settings that you want to migrate. - -- [Configure IBM Business Automation Application Engine](../../AAE/README_migrate.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_migrate.md) -- [Configure IBM Business Automation Insights](../../BAI/README_migrate.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_migrate.md) -- [Configure IBM Business Automation Studio](../../BAS/README_migrate.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_migrate.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_migrate.md) -- [Configure the User Management Service](../../UMS/README_migrate.md) - -## Step 2: Install your chosen components with the operator - - When you have completed all of the preparation steps for each of the components that you want to migrate, follow the instructions in the [installation](install.md) readme. diff --git a/platform/ocp/uninstall.md b/platform/ocp/uninstall.md deleted file mode 100644 index 5c963272..00000000 --- a/platform/ocp/uninstall.md +++ /dev/null @@ -1,30 +0,0 @@ -# Uninstalling Cloud Pak for Automation 20.0.1 on Red Hat OpenShift - -## Delete your automation instances - -You can delete your custom resource (CR) deployments by deleting the CR YAML file or the CR instance. The name of the instance is taken from the value of the `name` parameter in the CR YAML file. The following command is used to delete an instance. - -```bash -  $ oc delete ICP4ACluster -``` - -If you want to uninstall IBM Automation Digital Worker and unsubscribe from the IBM Business Automation Studio, you will have to scale up the IBM Automation Digital Worker unsubscribe ReplicaSet before deleting your automation instances. Despite the pod status, as an unsubscribe success proof, you should look into the pod's log for a `STATUS=success` statement. - -```bash - $ kubectl scale replicasets -adw-registry-unsubscribe --replicas=1 -``` - -> **Note**: You can get the names of the ICP4ACluster instances with the following command: - ```bash - $ oc get ICP4ACluster - ``` - -## Delete the operator instance and all associated automation instances - -Use the [`scripts/deleteOperator.sh`](../../scripts/deleteOperator.sh) to delete all the resources that are linked to the operator. - -```bash - $ ./scripts/deleteOperator.sh -``` - -Verify that all the pods created with the operator are terminated and deleted. diff --git a/platform/ocp/update.md b/platform/ocp/update.md deleted file mode 100644 index ab4ce231..00000000 --- a/platform/ocp/update.md +++ /dev/null @@ -1,54 +0,0 @@ -# Updating Cloud Pak for Automation 20.0.1 on Red Hat OpenShift - -- [Step 1: Modify the software that is installed](update.md#step-1-modify-the-software-that-is-installed) -- [Step 2: Apply the updated custom resources](update.md#step-2-apply-the-updated-custom-resources) -- [Step 3: Verify the updated automation containers](update.md#step-3-verify-the-updated-automation-containers) - -## Step 1: Modify the software that is installed - -An update to the custom resource (CR), overwrites the deployed resources during the operator control loop (observe, analyze, act) that occurs as a result of constantly watching the state of the Kubernetes resources. - -Use the following links to configure the software that is already installed. You can modify the installed software, remove it, or add new components. Use the same CR YAML file that you deployed with the operator to make the updates (for example descriptors/my_icp4a_cr.yaml). - -- [Configure IBM Automation Digital Worker](../../ADW/README_config.md) -- [Configure IBM Automation Workstream Services](../../IAWS/README_config.md) -- [Configure IBM Business Automation Application Engine](../../AAE/README_config.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_config.md) -- [Configure IBM Business Automation Insights](../../BAI/README_config.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_config.md) -- [Configure IBM Business Automation Studio](../../BAS/README_config.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_config.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_config.md) -- [Configure the User Management Service](../../UMS/README_config.md) - -## Step 2: Apply the updated custom resources - -1. Review your CR YAML file to make sure it contains all of your intended modifications. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Run the following commands to apply the updates to the operator: - - ```bash - $ oc apply -f descriptors/my_icp4a_cr.yaml --overwrite=true - ``` - -> **Note:** You can also use `oc edit ICP4ACluster ` to open the default UNIX visual editor (vi) in situ. - -## Step 3: Verify the updated automation containers - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ oc get pods -w -``` - -When all of the pods are *Running*, you can access the status of your services with the following commands. -```bash -$ oc status -``` - -Refer to the [Troubleshooting section](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html) to access the operator logs. diff --git a/platform/ocp/upgrade.md b/platform/ocp/upgrade.md deleted file mode 100644 index 48d79316..00000000 --- a/platform/ocp/upgrade.md +++ /dev/null @@ -1,72 +0,0 @@ -# Upgrading from IBM Cloud Pak for Automation 19.0.3 to 20.0.1 on OpenShift Cloud Platform (OCP) - -If you installed any of the Cloud Pak for Automation 19.0.3 components on an OCP cluster and you want to continue to use them in 20.0.1, you can upgrade them. - -> **Note:** If you are looking to upgrade Automation Digital Worker (ADW) 19.0.3 to 20.0.1, you must contact [IBM Support]( https://www.ibm.com/mysupport/s/) and open a support case. After your case is submitted, IBM support contacts you. - -## Step 1: Get access to the new container images - -Follow the instructions in step 1 of [Installing Cloud Pak for Automation 20.0.1 on Red Hat OpenShift](install.md#step-1-create-a-namespace-and-get-access-to-the-container-images) to clone the 20.0.1 GitHub repository and to get access to the new docker images. - -## Step 2: Update the operator version number to 20.0.1 - -1. Log in to the target cluster. - ```bash - $ oc login https://: -u - ``` -2. Go to the downloaded `cert-kubernetes.git` for 20.0.1, and change directory to cert-kubernetes. - ```bash - $ cd cert-kubernetes - ``` -3. Upgrade the icp4a-operator on your cluster. - - Use the 20.0.1 [scripts/upgradeOperator.sh](../../scripts/upgradeOperator.sh) script to deploy the operator manifest descriptors. - ```bash - $ ./scripts/upgradeOperator.sh -i /icp4a-operator:20.0.1 -p '' -a accept - ``` - - Where *registry_url* is the value for your internal docker registry or `cp.icr.io/cp/cp4a` for the IBM Cloud Entitled Registry, *my_secret_name* is the secret created to access the registry, and *accept* means that you accept the [license](../../LICENSE). - - > **Note**: If you plan to use a non-administrator user to install the operator, you must add the user to the `ibm-cp4-operator` role. For example: - ```bash - $ oc adm policy add-role-to-user ibm-cp4a-operator - ``` - -## Step 3: Update the image versions in the custom resource YAML file for your deployment - -Get the custom resource YAML file that you deployed and edit it by following the instructions for each component: - -- [Configure IBM Automation Workstreams Services](../../IAWS/README_upgrade.md) -- [Configure IBM Business Automation Application Engine](../../AAE/README_upgrade.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_upgrade.md) -- [Configure IBM Business Automation Insights](../../BAI/README_upgrade.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_upgrade.md) -- [Configure IBM Business Automation Studio](../../BAS/README_upgrade.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_upgrade.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_upgrade.md) -- [Configure the User Management Service](../../UMS/README_upgrade.md) - -## Step 4: Apply the updated custom resource to upgrade from 19.0.3 to 20.0.1 - -1. Check that all the components that you want to upgrade are configured. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Update the configured components by applying the custom resource. - - ```bash - $ oc apply -f descriptors/my_icp4a_cr.yaml - ``` - -## Step 5: Verify the applications - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ oc get pods -w -``` - -Log in to the web applications in your deployment and verify that they are ready and can be accessed. diff --git a/platform/roks/README.md b/platform/roks/README.md deleted file mode 100644 index 9f4d4a00..00000000 --- a/platform/roks/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# IBM Cloud Pak for Automation 20.0.1 on IBM Cloud Public - -Red Hat OpenShift 4.2 is the managed version on IBM Cloud for Cloud Pak for Automation 20.0.1. - -Choose which use case you need with an operator, and then follow the links below to find the right instructions: - -- [Install Cloud Pak for Automation 20.0.1 on IBM Cloud](install.md) -- [Uninstall Cloud Pak for Automation 20.0.1 on IBM Cloud](uninstall.md) -- [Upgrade Cloud Pak for Automation 19.0.3 to 20.0.1 on IBM Cloud](upgrade.md) -- [Migrate 19.0.x persisted data to 20.0.1 on IBM Cloud](migrate.md) -- [Update Cloud Pak for Automation 20.0.1 on IBM Cloud](update.md) diff --git a/platform/roks/install.md b/platform/roks/install.md deleted file mode 100644 index e9c671e0..00000000 --- a/platform/roks/install.md +++ /dev/null @@ -1,343 +0,0 @@ -# Installing Cloud Pak for Automation 20.0.1 on Managed OpenShift on IBM Cloud Public - -Before you deploy an automation container on IBM Cloud, you must configure your client environment, create an OpenShift cluster, prepare your container environment, and set up where to get the container images. - -Make sure that you have the following list of software on your computer so you can use the command line interfaces (CLIs) you need to interact with the cluster. - -- [IBM Cloud CLI](https://cloud.ibm.com/docs/containers?topic=containers-cs_cli_install) -- [OpenShift Container Platform CL](https://docs.openshift.com/container-platform/4.2/cli_reference/openshift_cli/getting-started-cli.html) -- [Kubernetes CLI](https://kubernetes.io/docs/tasks/tools/install-kubectl) -- [Docker CLI (Mac)](https://docs.docker.com/docker-for-mac/install) or [Docker CLI (Linux)](https://docs.docker.com/install/linux/docker-ce/ubuntu/#install-docker-engine) - -As an administrator of the cluster you must be able to interact with your environment. - -1. Create an account on [IBM Cloud](https://cloud.ibm.com/kubernetes/registry/main/start). -2. Log in to IBM Cloud if you already have an account. - -If you do not already have a cluster, then create one. From the [IBM Cloud Overview](https://cloud.ibm.com/kubernetes/overview) page, in the OpenShift Cluster tile, click Create Cluster. Refer to the [IBM Cloud documentation](https://cloud.ibm.com/docs/openshift?topic=openshift-openshift-create-cluster#openshift_create_cluster_console) to create a Kubernetes cluster. The cluster that you create includes attached storage. - -- [Step 1: Get access to the container images](install.md#step-1-get-access-to-the-container-images) -- [Step 2: Prepare the cluster for automation software](install.md#step-2-prepare-the-cluster-for-automation-software) -- [Step 3: Create a shared PVC and add the JDBC drivers](install.md#step-3-create-a-shared-pvc-and-add-the-jdbc-drivers) -- [Step 4: Deploy the operator manifest files to your cluster](install.md#step-4-deploy-the-operator-manifest-files-to-your-cluster) -- [Step 5: Configure the software that you want to install](install.md#step-5-configure-the-software-that-you-want-to-install) -- [Step 6: Deploy the operator and custom resources](install.md#step-6-apply-the-custom-resources) -- [Step 7: Verify that the operator and pods are running](install.md#step-7-verify-that-the-operator-and-pods-are-running) -- [Step 8: Complete some post-installation steps](install.md#step-8-complete-some-post-installation-steps) - -## Step 1: Get access to the container images - -1. Log in to your IBM Cloud Kubernetes cluster. In the OpenShift web console menu bar, click your profile *IAM#user.name@email.com* > *Copy Login Command* and paste the copied command into your command line. - ```bash - $ oc login https://: --token= - ``` -2. Run a `kubectl` command to make sure that you have access to Kubernetes. - ```bash - $ kubectl cluster-info - ``` -3. Download or clone the repository to your local machine and go to the `cert-kubernetes` directory. - ```bash - $ git clone git@github.com:icp4a/cert-kubernetes.git - $ cd cert-kubernetes - ``` - The `cert-kubernetes` directory includes all of the scripts and descriptors that are needed to install Cloud Pak for Automation. - -4. Create a project for each release that you want to install by running the following commands. - ```bash - $ oc new-project --description="" --display-name="" - ``` -5. Add privileges to the projects. Grant ibm-anyuid-scc privileges to any authenticated user and grant ibm-privileged-scc privileges to any authenticated user. - ```bash - $ oc project - $ oc adm policy add-scc-to-user privileged -z default - $ oc adm policy add-scc-to-group ibm-anyuid-scc system:authenticated - $ oc adm policy add-scc-to-user ibm-privileged-scc system:authenticatedCopy - ``` - > Note: You need a privileged account to run the oc adm policy command. The must have pull request privileges to the registry where the images are loaded. The must also have pull request privileges to push the images into another namespace. - -6. Make sure that your entitled container images are available and accessible in one of the IBM docker registries. Use either **option 1** or **option 2**. - -### Option 1: Create a pull secret for the IBM Cloud Entitled Registry - -1. Log in to [MyIBM Container Software Library](https://myibm.ibm.com/products-services/containerlibrary) with the IBMid and password that are associated with the entitled software. - -2. In the **Container software library** tile, click **View library** and then click **Copy key** to copy the entitlement key to the clipboard. - -3. Create a pull secret by running a `kubectl create secret` command. - ```bash - $ kubectl create secret docker-registry admin.registrykey --docker-server=cp.icr.io --docker-username=iamapikey --docker-password="" --docker-email= - ``` - > **Note**: The `cp.icr.io` value for the **docker-server** parameter is the only registry domain name that contains the images. - - > **Note**: Use “cp” for the docker-username. The docker-email has to be a valid email address (associated to your IBM ID). Make sure you are copying the Entitlement Key in the docker-password field within double-quotes. - -4. Take a note of the secret and the server values so that you can set them to the **pullSecrets** and **repository** parameters when you run the operator for your containers. - -5. Install the Container Registry plug-in. - ```bash - $ ibmcloud plugin install container-registry -r 'IBM Cloud' - ``` -6. Log in to your IBM Cloud account. - ```bash - $ ibmcloud login -a https://cloud.ibm.com - ``` -7. Set the region as global. - ```bash - $ ibmcloud cr region-set global - ``` -8. List the available images by using the following command. - ```bash - $ ibmcloud cr image-list --include-ibm | grep -i cp4a - ``` - -### Option 2: Download the packages from PPA and load the images - -[IBM Passport Advantage (PPA)](https://www-01.ibm.com/software/passportadvantage/pao_customer.html) provides archives (.tgz) for the software. To view the list of Passport Advantage eAssembly installation images, refer to the [20.0.1 download document](https://www.ibm.com/support/pages/ibm-cloud-pak-automation-v2001-download-document). - -1. Download one or more PPA packages to a server that is connected to your Docker registry. - -2. Check that you can run a docker command. - ```bash - $ docker ps - ``` -3. Log in to the Docker registry with a token. - ```bash - $ docker login $(oc registry info) -u -p $(oc whoami -t) - ``` - - You can also log in to an external Docker registry using the following command: - ```bash - $ docker login -u - ``` -4. Run a `kubectl` command to make sure that you have access to Kubernetes. - ```bash - $ kubectl cluster-info - ``` -5. Download the loadimages.sh script. Change the permissions so that you can run the script. - ```bash - $ chmod +x loadimages.sh - ``` -6. Use the [`scripts/loadimages.sh`](../../scripts/loadimages.sh) script to push the images into the IBM Cloud Container Registry.Specify the two mandatory parameters in the command line. - - ``` - -p PPA archive files location or archive filename - -r Target Docker registry and namespace - -l Optional: Target a local registry - ``` - The following example shows the input values in the command line. - ```bash - ./loadimages.sh -p .tgz -r / - ``` - - > Note: A registry domain name is associated with your cluster location. The name us.icr.io for example, is for the region us-south. The region and registry domain names are listed on the https://cloud.ibm.com/docs/services/Registry. The default docker registry is based on the host name, for example "default-route-openshift-image-registry.ibm.com". The project must have pull request privileges to the registry where the images are loaded. The project must also have pull request privileges to push the images into another namespace/project. - -7. After you push the images to the registry, check whether they are pushed correctly by running the following command. - ```bash - $ ibmcloud cr images --restrict - ``` -8. Create a pull secret to be able to pull images from the IBM Cloud Container Registry. - ```bash - $ kubectl create secret docker-registry admin.registrykey \ - --docker-server= --docker-username=iamapikey \ - --docker-password="" --docker-email= --namespace - ``` - To generate an API KEY, go to Security > Manage > Identity and Access > IBM Cloud API Keys in the IBM Cloud menu and select Generate an IBM Cloud API key. - -9. Take a note of the secret names so that you can set them to the **pullSecrets** parameter when you run the installation for your containers. -10. (Optional) If you want to use an external Docker registry, create a Docker registry secret. - ```bash - $ oc create secret docker-registry --docker-server= --docker-username= --docker-password= --docker-email= - ``` - Take a note of the secret and the server values so that you can set them to the **pullSecrets** and **repository** parameters when you run the operator for your containers. - -## Step 2: Prepare the cluster for automation software - - Before you install any of the containerized software: - - 1. Follow the instructions on preparing the cluster for the software components that you want to install in the [IBM Cloud Pak for Automation 20.0.x](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_prepare_env_k8s.html) Knowledge Center. - - How much preparation you need to do depends on what you want to install and how familiar you are with the cluster. - -## Step 3: Create a shared PVC and add the JDBC drivers - - IBM Public Cloud ROKS cluster by default attached to an endurance storage which comes with pre-defined storage classes. In order to copy the JDBC drivers to Operator pod you will need to create a new storage class with the following storage requirements to allow copy of JDBC drivers. - 1. Use one of the available storage classes with "gid". (ibmc-file-bronze-gid , ibmc-file-retain-gold , ibmc-file-silver-gid ) - 2. Apply the new storage class yaml - ```bash - $ oc apply -f operator-sc.yaml - - 3. Create a claim for a PV dynamically , [descriptors/operator-shared-pvc.yaml](../../descriptors/operator-shared-pvc.yaml?raw=true). - - > Replace the storage class with the name of the storage class from Step 1. Which is (ibmc-file-bronze-gid , ibmc-file-retain-gold , ibmc-file-silver-gid ) - - 2. Deploy the PVC. - ```bash - $ oc create -f descriptors/operator-shared-pvc.yaml - ``` - Run the following commands to get the bound PV name and the PV location. - ```bash - $ oc get pvc | grep operator-shared-pvc - $ oc describe PV PV_name - ``` - - 3. If your storage configuration needs JDBC drivers, create a `jdbc` parent folder on your remote file system and put your drivers into the following structure. - ``` - └── jdbc - - ├── db2 - - │ ├── db2jcc4.jar - - │ └── db2jcc_license_cu.jar - - ├── oracle - - │ └── ojdbc8.jar - ``` - > **Note**: File names for JDBC drivers cannot include additional version information. - - ``` - - DB2: - - db2jcc4.jar - - db2jcc_license_cu.jar - - Oracle: - - ojdbc8.jar - ``` - - 4. Copy these files to the operator pod by running the following commands: - ```bash - $ podname=$(oc get pod | grep ibm-cp4a-operator | awk '{print $1}') - $ kubectl cp $PATH_TO_JDBC/jdbc $NAMESPACE/$podname:/opt/ansible/share -c ansible - ``` - - ## Step 4: Deploy the operator manifest files to your cluster - - The Cloud Pak operator has a number of descriptors that must be applied. - - [descriptors/ibm_icp4a_crd.yaml](../../descriptors/ibm_icp4a_crd.yaml?raw=true) contains the description of the Custom Resource Definition. - - [descriptors/operator.yaml](../../descriptors/operator.yaml?raw=true) defines the deployment of the operator code. - - [descriptors/role.yaml](../../descriptors/role.yaml?raw=true) defines the access of the operator. - - [descriptors/role_binding.yaml](../../descriptors/role_binding.yaml?raw=true) defines the access of the operator. - - [descriptors/service_account.yaml](../../descriptors/service_account.yaml?raw=true) defines the identity for processes that run inside the pods of the operator. - - 1. Deploy the icp4a-operator on your cluster. - - Use the script [scripts/deployOperator.sh](../../scripts/deployOperator.sh) to deploy these descriptors. - ```bash - $ ./scripts/deployOperator.sh -i /icp4a-operator:20.0.1 -p '' -a accept - ``` - - Where *registry_url* is the value for your internal docker registry or `cp.icr.io/cp/cp4a` for the IBM Cloud Entitled Registry, *my_secret_name* is the secret created to access the registry, and *accept* means that you accept the [license](../../LICENSE). - - > **Note**: If you plan to use a non-admin user to install the operator, you must add the user to the `ibm-cp4-operator` role. For example: - ```bash - $ oc adm policy add-role-to-user ibm-cp4a-operator - ``` - - 2. Monitor the pod until it shows a STATUS of *Running*: - ```bash - $ oc get pods -w - ``` - > **Note**: When started, you can monitor the operator logs with the following command: - ```bash - $ oc logs -f deployment/ibm-cp4a-operator -c operator - ``` - - ## Step 5: Configure the software that you want to install - - A custom resource (CR) YAML file is a configuration file that describes an ICP4ACluster instance and includes the parameters to install some or all of the components. - - 1. Make a copy of the template custom resource YAML file [descriptors/ibm_cp4a_cr_template.yaml](../../descriptors/ibm_cp4a_cr_template.yaml?raw=true) and name it appropriately for your deployment (for example descriptors/my_icp4a_cr.yaml). - - > **Important:** Because the maximum length of labels in Kubernetes is 63 characters, be careful with the lengths of your CR name and instance names. Some components can configure multiple instances, each instance must have a different name. The total length of the CR name and an instance name must not exceed 24 characters, otherwise some component deployments fail. - - You must use a single custom resource file to include all of the components that you want to deploy with an operator instance. Each time that you need to make an update or modification you must use this same file to apply the changes to your deployments. When you apply a new custom resource to an operator you must make sure that all previously deployed resources are included if you do not want the operator to delete them. - - 2. Change the default name of your instance in descriptors/my_icp4a_cr.yaml. - - ```yaml - metadata: - name: - ``` - - 3. If you plan to install UMS and/or AAE and you use the IBM entitled registry, uncomment the lines for the `image_pull_secrets` and `images` parameters in the `shared_configuration` section. - - If you use an internal registry, enter your values for these parameters. - - ```yaml - shared_configuration: - image_pull_secrets: - - - images: - keytool_job_container: - repository: :5000//dba-keytool-initcontainer - tag: 20.0.1 - dbcompatibility_init_container: - repository: :5000//dba-dbcompatibility-initcontainer - tag: 20.0.1 - keytool_init_container: - repository: :5000//dba-keytool-jobcontainer - tag: 20.0.1 - umsregistration_initjob: - repository: :5000//dba-umsregistration-initjob - tag: 20.0.1 - pull_policy: IfNotPresent - ``` - - | Parameter | Description | - | ------------------------------- | --------------------------------------------- | - | `keytool_job_container` | Repository from where to pull the UMS keytool_job_container and the corresponding tag | - | `dbcompatibility_init_container` | Repository from where to pull the AAE init_container and the corresponding tag | - | `keytool_init_container` | Repository from where to pull the UMS keytool_init_container and the corresponding tag | - | `umsregistration_initjob` | Repository from where to pull the AAE umsregistration_initjob and the corresponding tag | - | `image_pull_secrets` | Secrets in your target namespace to pull images from the specified repository | - - > **Note:** If you do not plan to install UMS or AAE, you can leave these lines commented in your copy of the custom resource template file. - - 4. Use the following links to configure the software that you want to install. - - - [Configure IBM Automation Digital Worker](../../ADW/README_config.md) - - [Configure IBM Automation Workstream Services](../../IAWS/README_config.md) - - [Configure IBM Business Automation Application Engine](../../AAE/README_config.md) - - [Configure IBM Business Automation Content Analyzer](../../ACA/README_config.md) - - [Configure IBM Business Automation Insights](../../BAI/README_config.md) - - [Configure IBM Business Automation Navigator](../../BAN/README_config.md) - - [Configure IBM Business Automation Studio](../../BAS/README_config.md) - - [Configure IBM FileNet Content Manager](../../FNCM//README_config.md) - - [Configure IBM Operational Decision Manager](../../ODM/README_config.md) - - [Configure the User Management Service](../../UMS/README_config.md) - - ## Step 6: Apply the custom resources - - 1. Check that all the components you want to install are configured. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - - 2. Deploy the configured components by applying the custom resource. - - ```bash - $ oc apply -f descriptors/my_icp4a_cr.yaml - ``` - - ## Step 7: Verify that the operator and pods are running - - The operator reconciliation loop might take several minutes. - - Monitor the status of your pods with: - ```bash - $ oc get pods -w - ``` - - When all of the pods are *Running*, you can access the status of your services with the following command. - ```bash - $ oc status - ``` - You can now expose the services to your users. - - Refer to the [Troubleshooting section](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html) to access the operator logs. - - ## Step 8: Complete some post-installation steps - - Go to [IBM Knowledge Center](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_deploy_postdeployk8s.html) to follow the post-installation steps. - diff --git a/platform/roks/migrate.md b/platform/roks/migrate.md deleted file mode 100644 index f80fbdbf..00000000 --- a/platform/roks/migrate.md +++ /dev/null @@ -1,20 +0,0 @@ -# Migrating Cloud Pak for Automation data on Managed Red Hat OpenShift - -To migrate your 19.0.x data to 20.0.1, uninstall your current deployment and follow the migration instructions for each component to point to the existing persistent stores. - -## Step 1: Prepare your environment and take note of your existing storage settings - -Use the following links to help you find the relevant software storage settings that you want to migrate. - -- [Configure IBM Business Automation Application Engine](../../AAE/README_migrate.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_migrate.md) -- [Configure IBM Business Automation Insights](../../BAI/README_migrate.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_migrate.md) -- [Configure IBM Business Automation Studio](../../BAS/README_migrate.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_migrate.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_migrate.md) -- [Configure the User Management Service](../../UMS/README_migrate.md) - -## Step 2: Install your chosen components with the operator - - When you have completed all of the preparation steps for each of the components that you want to migrate, follow the instructions in the [installation](install.md) readme. diff --git a/platform/roks/uninstall.md b/platform/roks/uninstall.md deleted file mode 100644 index f5104034..00000000 --- a/platform/roks/uninstall.md +++ /dev/null @@ -1,30 +0,0 @@ -# Uninstalling Cloud Pak for Automation 20.0.1 on Managed Red Hat OpenShift - -## Delete your automation instances - -You can delete your custom resource (CR) deployments by deleting the CR YAML file or the CR instance. The name of the instance is taken from the value of the `name` parameter in the CR YAML file. The following command is used to delete an instance. - -```bash -  $ oc delete ICP4ACluster -``` - -If you want to uninstall IBM Automation Digital Worker and unsubscribe from the IBM Business Automation Studio, you will have to scale up the IBM Automation Digital Worker unsubscribe ReplicaSet before deleting your automation instances. Despite the pod status, as an unsubscribe success proof, you should look into the pod's log for a `STATUS=success` statement. - -```bash - $ kubectl scale replicasets -adw-registry-unsubscribe --replicas=1 -``` - -> **Note**: You can get the names of the ICP4ACluster instances with the following command: - ```bash - $ oc get ICP4ACluster - ``` - -## Delete the operator instance and all associated automation instances - -Use the [`scripts/deleteOperator.sh`](../../scripts/deleteOperator.sh) to delete all the resources that are linked to the operator. - -```bash - $ ./scripts/deleteOperator.sh -``` - -Verify that all the pods created with the operator are terminated and deleted. diff --git a/platform/roks/update.md b/platform/roks/update.md deleted file mode 100644 index 26f47caa..00000000 --- a/platform/roks/update.md +++ /dev/null @@ -1,54 +0,0 @@ -# Updating Cloud Pak for Automation 20.0.1 on Managed Red Hat OpenShift - -- [Step 1: Modify the software that is installed](update.md#step-1-modify-the-software-that-is-installed) -- [Step 2: Apply the updated custom resources](update.md#step-2-apply-the-updated-custom-resources) -- [Step 3: Verify the updated automation containers](update.md#step-3-verify-the-updated-automation-containers) - -## Step 1: Modify the software that is installed - -An update to the custom resource (CR), overwrites the deployed resources during the operator control loop (observe, analyze, act) that occurs as a result of constantly watching the state of the Kubernetes resources. - -Use the following links to configure the software that is already installed. You can modify the installed software, remove it, or add new components. Use the same CR YAML file that you deployed with the operator to make the updates (for example descriptors/my_icp4a_cr.yaml). - -- [Configure IBM Automation Digital Worker](../../ADW/README_config.md) -- [Configure IBM Automation Workstream Services](../../IAWS/README_config.md) -- [Configure IBM Business Automation Application Engine](../../AAE/README_config.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_config.md) -- [Configure IBM Business Automation Insights](../../BAI/README_config.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_config.md) -- [Configure IBM Business Automation Studio](../../BAS/README_config.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_config.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_config.md) -- [Configure the User Management Service](../../UMS/README_config.md) - -## Step 2: Apply the updated custom resources - -1. Review your CR YAML file to make sure it contains all of your intended modifications. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Run the following commands to apply the updates to the operator: - - ```bash - $ oc apply -f descriptors/my_icp4a_cr.yaml --overwrite=true - ``` - -> **Note:** You can also use `oc edit ICP4ACluster ` to open the default UNIX visual editor (vi) in situ. - -## Step 3: Verify the updated automation containers - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ oc get pods -w -``` - -When all of the pods are *Running*, you can access the status of your services with the following commands. -```bash -$ oc status -``` - -Refer to the [Troubleshooting section](https://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html) to access the operator logs. diff --git a/platform/roks/upgrade.md b/platform/roks/upgrade.md deleted file mode 100644 index 157621fa..00000000 --- a/platform/roks/upgrade.md +++ /dev/null @@ -1,76 +0,0 @@ -# Upgrading from IBM Cloud Pak for Automation 19.0.3 to 20.0.1 on IBM Cloud - -If you installed any of the Cloud Pak for Automation 19.0.3 components on an IBM Cloud cluster and you want to continue to use them in 20.0.1, you can upgrade them. - -> **Note:** If you are looking to upgrade Automation Digital Worker (ADW) 19.0.3 to 20.0.1, you must contact [IBM Support]( https://www.ibm.com/mysupport/s/) and open a support case. After your case is submitted, IBM support contacts you. - -## Step 1: Get access to the new container images - -Follow the instructions in step 1 of [Installing Cloud Pak for Automation 20.0.1 on Red Hat OpenShift](install.md#step-1-create-a-namespace-and-get-access-to-the-container-images) to clone the 20.0.1 GitHub repository and to get access to the new docker images. - -## Step 2: Update the operator version number to 20.0.1 - -1. Log in to your IBM Cloud cluster. In the OpenShift web console menu bar, click your profile *IAM#user.name@email.com* > *Copy Login Command* and paste the copied command into your command line. - ```bash - $ oc login https://: --token= - ``` -2. Run a `kubectl` command to make sure that you have access to Kubernetes. - ```bash - $ kubectl cluster-info - ``` -3. Go to the downloaded `cert-kubernetes.git` for 20.0.1, and change directory to cert-kubernetes. - ```bash - $ cd cert-kubernetes - ``` -4. Upgrade the icp4a-operator on your cluster. - - Use the 20.0.1 [scripts/upgradeOperator.sh](../../scripts/upgradeOperator.sh) script to deploy the operator manifest descriptors. - ```bash - $ ./scripts/upgradeOperator.sh -i /icp4a-operator:20.0.1 -p '' -a accept - ``` - - Where *registry_url* is the value for your internal docker registry or `cp.icr.io/cp/cp4a` for the IBM Cloud Entitled Registry, *my_secret_name* is the secret created to access the registry, and *accept* means that you accept the [license](../../LICENSE). - - > **Note**: If you plan to use a non-administrator user to install the operator, you must add the user to the `ibm-cp4-operator` role. For example: - ```bash - $ oc adm policy add-role-to-user ibm-cp4a-operator - ``` - -## Step 3: Update the image versions in the custom resource YAML file for your deployment - -Get the custom resource YAML file that you deployed and edit it by following the instructions for each component: - -- [Configure IBM Automation Workstreams Services](../../IAWS/README_upgrade.md) -- [Configure IBM Business Automation Application Engine](../../AAE/README_upgrade.md) -- [Configure IBM Business Automation Content Analyzer](../../ACA/README_upgrade.md) -- [Configure IBM Business Automation Insights](../../BAI/README_upgrade.md) -- [Configure IBM Business Automation Navigator](../../BAN/README_upgrade.md) -- [Configure IBM Business Automation Studio](../../BAS/README_upgrade.md) -- [Configure IBM FileNet Content Manager](../../FNCM//README_upgrade.md) -- [Configure IBM Operational Decision Manager](../../ODM/README_upgrade.md) -- [Configure the User Management Service](../../UMS/README_upgrade.md) - -## Step 4: Apply the updated custom resource to upgrade from 19.0.3 to 20.0.1 - -1. Check that all the components that you want to upgrade are configured. - - ```bash - $ cat descriptors/my_icp4a_cr.yaml - ``` - -2. Update the configured components by applying the custom resource. - - ```bash - $ oc apply -f descriptors/my_icp4a_cr.yaml - ``` - -## Step 5: Verify the applications - -The operator reconciliation loop might take several minutes. - -Monitor the status of your pods with: -```bash -$ oc get pods -w -``` - -Log in to the web applications in your deployment and verify that they are ready and can be accessed. diff --git a/scripts/cp4a-clusteradmin-setup.sh b/scripts/cp4a-clusteradmin-setup.sh index 8e94915e..b516908a 100755 --- a/scripts/cp4a-clusteradmin-setup.sh +++ b/scripts/cp4a-clusteradmin-setup.sh @@ -18,7 +18,7 @@ else PARENT_DIR=$CUR_DIR fi TEMP_FOLDER=${CUR_DIR}/.tmp - +INSTALL_BAI="" CRD_FILE=${PARENT_DIR}/descriptors/ibm_cp4a_crd.yaml SA_FILE=${PARENT_DIR}/descriptors/service_account.yaml CLUSTER_ROLE_FILE=${PARENT_DIR}/descriptors/cluster_role.yaml @@ -27,13 +27,27 @@ CLUSTER_ROLE_BINDING_FILE_TEMP=${TEMP_FOLDER}/.cluster_role_binding.yaml ROLE_FILE=${PARENT_DIR}/descriptors/role.yaml ROLE_BINDING_FILE=${PARENT_DIR}/descriptors/role_binding.yaml OPERATOR_FILE=${PARENT_DIR}/descriptors/operator.yaml +BRONZE_STORAGE_CLASS=${PARENT_DIR}/descriptors/cp4a-bronze-storage-class.yaml +SILVER_STORAGE_CLASS=${PARENT_DIR}/descriptors/cp4a-silver-storage-class.yaml +GOLD_STORAGE_CLASS=${PARENT_DIR}/descriptors/cp4a-gold-storage-class.yaml LICENSE_FILE=${CUR_DIR}/LICENSES/LICENSE LOG_FILE=${CUR_DIR}/prepare_install.log +PLATFORM_SELECTED="" +PLATFORM_VERSION="" +PROJ_NAME="" + +COMMON_SERVICES_CRD_DIRECTORY_OCP311=${PARENT_DIR}/descriptors/common-services/scripts +COMMON_SERVICES_CRD_DIRECTORY=${PARENT_DIR}/descriptors/common-services/crds +COMMON_SERVICES_OPERATOR_ROLES=${PARENT_DIR}/descriptors/common-services/roles +COMMON_SERVICES_TEMP_DIR=$TMEP_FOLDER mkdir -p $TEMP_FOLDER >/dev/null 2>&1 echo '' > $LOG_FILE function validate_cli(){ + clear + echo -e "\x1B[1mThis script prepares the environment for the deployment of some Cloud Pak for Automation capabilities \x1B[0m" + echo which oc &>/dev/null [[ $? -ne 0 ]] && \ echo "Unable to locate an OpenShift CLI. You must install it to run this script." && \ @@ -41,47 +55,54 @@ function validate_cli(){ } function collect_input() { - clear - echo This script prepares the environment for the deployment of some Cloud Pak for Automation capabilities + + project_name="" while [[ $project_name == "" ]]; do + echo read -p "Enter the name for a new project or an existing project (namespace): " project_name if [ -z "$project_name" ]; then echo -e "\x1B[1;31mEnter a valid project name, project name can not be blank\x1B[0m" + elif [[ "$project_name" == openshift* ]]; then + echo -e "\x1B[1;31mEnter a valid project name, project name should not be 'openshift' or start with 'openshift' \x1B[0m" + project_name="" + elif [[ "$project_name" == kube* ]]; then + echo -e "\x1B[1;31mEnter a valid project name, project name should not be 'kube' or start with 'kube' \x1B[0m" + project_name="" else create_project fi done user_name="" - while [[ $user_name == "" ]]; - do - read -p "Enter an existing username in your cluster, non-admin is suggested: " user_name - if [ -z "$user_name" ]; then - echo -e "\x1B[1;31mEnter a valid user name, user name can not be blank\x1B[0m" - else - check_user_exist - fi - done + select_user } + + function create_project() { - oc get project | grep "${project_name}" >/dev/null 2>&1 - returnValue=$? - if [ "$returnValue" == 1 ] ; then + + isProjExists=`oc get project $project_name --ignore-not-found | wc -l` >/dev/null 2>&1 + + if [ $isProjExists -ne 2 ] ; then oc new-project ${project_name} >> ${LOG_FILE} returnValue=$? if [ "$returnValue" == 1 ]; then echo -e "\x1B[1mInvalid project name, please enter a valid name...\x1B[0m" project_name="" else - echo -e "\x1B[1mCreate project ${project_name}...\x1B[0m" + echo -e "\x1B[1mUsing project ${project_name}...\x1B[0m" fi else echo -e "\x1B[1mProject \"${project_name}\" already exists! Continue...\x1B[0m" fi - + PROJ_NAME=${project_name} + + if [[ $PLATFORM_VERSION == "3.11" ]]; then + oc adm policy add-scc-to-user privileged -z ibm-cp4a-operator -n ${project_name} + fi + } function check_user_exist() { @@ -96,20 +117,21 @@ function check_user_exist() { } function bind_scc() { - echo -ne Binding the 'privileged' role to the 'default' service account... - dba_scc=$(oc get scc privileged | awk '{print $1}' ) - if [ -n "$dba_scc" ]; then - oc adm policy add-scc-to-user privileged -z default >> ${LOG_FILE} - else - echo "The 'privileged' security context constraint (SCC) does not exist in the cluster. Make sure that you update your environment to include this SCC." - exit 1 - fi - echo "Done" + echo + echo -ne Binding the 'privileged' role to the 'default' service account... + dba_scc=$(oc get scc privileged | awk '{print $1}' ) + if [ -n "$dba_scc" ]; then + oc adm policy add-scc-to-user privileged -z default >> ${LOG_FILE} + else + echo "The 'privileged' security context constraint (SCC) does not exist in the cluster. Make sure that you update your environment to include this SCC." + exit 1 + fi + echo "Done" } function prepare_install() { sed -e "s//${project_name}/g" ${CLUSTER_ROLE_BINDING_FILE} > ${CLUSTER_ROLE_BINDING_FILE_TEMP} - + echo echo -ne "Creating the custom resource definition (CRD) and a service account that has the permissions to manage the resources..." oc apply -f ${CRD_FILE} -n ${project_name} --validate=false >> ${LOG_FILE} oc apply -f ${CLUSTER_ROLE_FILE} --validate=false >> ${LOG_FILE} @@ -119,6 +141,7 @@ function prepare_install() { oc apply -f ${ROLE_BINDING_FILE} -n ${project_name} --validate=false >> ${LOG_FILE} echo "Done" + echo echo -ne Adding the user ${user_name} to the ibm-cp4a-operator role... oc project ${project_name} >> ${LOG_FILE} oc adm policy add-role-to-user edit ${user_name} >> ${LOG_FILE} @@ -128,20 +151,6 @@ function prepare_install() { echo "Done" } -# ACA need this task to tag node with special labels -function tag_nodes(){ - echo -ne Tagging the worker nodes... - nodes=$(oc get nodes | grep compute | grep [^Not]Ready | awk '{print $1}' | cut -d ',' -f1 | tr -d '"') - for i in $nodes - do - if [ $i != 'NAME' ]; then - # oc label nodes $i {celery$project_name-,mongo$project_name-,mongo-admin$project_name-} >> ${LOG_FILE} - oc label nodes $i mongo$project_name=aca mongo-admin$project_name=aca celery$project_name=aca --overwrite=true >> ${LOG_FILE} - fi - done - echo "Done" -} - function check_existing_sc(){ # Check existing storage class sc_result=$(oc get sc 2>&1) @@ -151,41 +160,451 @@ function check_existing_sc(){ then clear echo -e "\x1B[1;31mAt least one dynamic storage class must be available in order to proceed.\n\x1B[0m" - echo -e "\x1B[1;31mPlease refer to the README for the requirements and instructions. The script will now exit.!.\n\x1B[0m" + echo -e "\x1B[1;31mPlease refer to the README for the requirements and instructions. The script will now exit!.\n\x1B[0m" exit 1 fi } -function display_storage_classes() { +function display_storage_classes_ocp() { echo - echo "A storage class is needed to run the deployment script. You can get the existing storage class(es) in the environment by running the following command: oc get storageclass. Take note of the storage class that you want to use. "    + echo "Storage classes are needed to run the deployment script. For the "Demo" deployment scenario, you may use one (1) storage class.  For an "Enterprise" deployment, the deployment script will ask for three (3) storage classes to meet the "slow", "medium", and "fast" storage for the configuration of CP4A components.  If you don't have three (3) storage classes, you can use the same one for "slow", "medium", or fast.  Note that you can get the existing storage class(es) in the environment by running the following command: oc get storageclass. Take note of the storage classes that you want to use for deployment. " oc get storageclass } + function display_node_name() { echo - echo "The Infrastructure Node host name for the environment is needed to run the deployment script. You can get the host name by running the following command: oc get nodes --selector node-role.kubernetes.io/infra=true -o custom-columns=":metadata.name". Take note of the host name. "    + if [[ $PLATFORM_VERSION == "3.11" ]]; + then + echo "Below is the host name of the Infrastructure Node for the environment, which is required as an input during the execution of the deployment script for the creation of routes in OCP. You can also get the host name by running the following command: oc get nodes --selector node-role.kubernetes.io/infra=true -o custom-columns=":metadata.name". Take note of the host name. " oc get nodes --selector node-role.kubernetes.io/infra=true -o custom-columns=":metadata.name" + elif [[ $PLATFORM_VERSION == "4.2" || $PLATFORM_VERSION == "4.3" || $PLATFORM_VERSION == "4.4" ]]; + then + echo "Below is the route host name for the environment, which is required as an input during the execution of the deployment script for the creation of routes in OCP. You can also get the host name by running the following command: oc get route console -n openshift-console -o yaml|grep routerCanonicalHostname. Take note of the host name. " + oc get route console -n openshift-console -o yaml|grep routerCanonicalHostname | head -1 | cut -d ' ' -f 6 + fi } + function create_scc() { oc create serviceaccount ibm-pfs-es-service-account oc create -f ibm-pfs-privileged-scc.yaml oc adm policy add-scc-to-user ibm-pfs-privileged-scc -z ibm-pfs-es-service-account } + function clean_up(){ rm -rf ${TEMP_FOLDER} >/dev/null 2>&1 } + +function select_platform(){ + COLUMNS=12 + echo -e "\x1B[1mSelect the cloud platform to deploy: \x1B[0m" + options=("Openshift Container Platform (OCP) - Private Cloud" "Other ( Certified Kubernetes Cloud Platform / CNCF)") + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "RedHat OpenShift Kubernetes Service (ROKS) - Public Cloud") + PLATFORM_SELECTED="ROKS" + break + ;; + "Openshift Container Platform (OCP) - Private Cloud") + PLATFORM_SELECTED="OCP" + break + ;; + "Other ( Certified Kubernetes Cloud Platform / CNCF)") + PLATFORM_SELECTED="other" + break + ;; + *) echo "invalid option $REPLY";; + esac + done +} + + +function select_deployment_type(){ + COLUMNS=12 + echo -e "\x1B[1mWhat type of deployment is being performed?\x1B[0m" + if [[ $PLATFORM_SELECTED == "ROKS" ]]; + then + options=("Demo") + PS3='Enter a valid option [1 to 1]: ' + select opt in "${options[@]}" + do + case $opt in + "Demo") + DEPLOYMENT_TYPE="demo" + break + ;; + *) echo "invalid option $REPLY";; + esac + done + else + options=("Demo" "Enterprise") + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "Demo") + DEPLOYMENT_TYPE="demo" + break + ;; + "Enterprise") + DEPLOYMENT_TYPE="enterprise" + break + ;; + *) echo "invalid option $REPLY";; + esac + done + fi +} + +function select_user(){ + user_result=$(oc get user 2>&1) + user_substring="No resources found" + if [[ $user_result == *"$user_substring"* ]]; + then + clear + echo -e "\x1B[1;31mAt least one user must be available in order to proceed.\n\x1B[0m" + echo -e "\x1B[1;31mPlease refer to the README for the requirements and instructions. The script will now exit.!\n\x1B[0m" + exit 1 + fi + echo + userlist=$(oc get user|awk '{if(NR>1){if(NR==2){ arr=$1; }else{ arr=arr" "$1; }} } END{ print arr }') + COLUMNS=12 + echo -e "\x1B[1mHere are the existing users on this cluster: \x1B[0m" + options=($userlist) + usernum=${#options[*]} + PS3='Enter an existing username in your cluster, valid option [1 to '${usernum}'], non-admin is suggested: ' + select opt in "${options[@]}" + do + if [[ -n "$opt" && "${options[@]}" =~ $opt ]]; then + user_name=$opt + break + else + echo "invalid option $REPLY" + fi + done +} + +function display_installationprompt(){ + + echo "If you want to install Business Automation Insights, you must have IBM Event Streams already installed before you run the deployment script." + echo "For more information about the IBM Event Streams supported version number and licensing restrictions, see IBM Knowledge Center." + echo "" + echo "IBM Common Services with Metering & Licensing Components will be installed" + + NAMESPACE_ODLM="common-service" + oc project $NAMESPACE_ODLM >/dev/null 2>&1 || oc new-project $NAMESPACE_ODLM >/dev/null 2>&1 +} + + +function check_storage_class() { + if [[ $PLATFORM_SELECTED == "OCP" ]]; + then + display_storage_classes_ocp + fi + if [[ $PLATFORM_SELECTED == "ROKS" ]]; + then + create_storage_classes_roks + fi + +} + +function create_storage_classes_roks() { + echo + echo -ne "\x1B[1mCreate storage classes for deployment: \x1B[0m" + oc apply -f ${BRONZE_STORAGE_CLASS} --validate=false >> ${LOG_FILE} + oc apply -f ${SILVER_STORAGE_CLASS} --validate=false >> ${LOG_FILE} + oc apply -f ${GOLD_STORAGE_CLASS} --validate=false >> ${LOG_FILE} + echo -e "\x1B[1mDone \x1B[0m" + #echo + #echo -e "\x1B[1mTake note of the storage classes that you can use for deployment \x1B[0m" + #oc get storageclass +} + +function display_storage_classes_roks() { + sc_bronze_name=cp4a-file-retain-bronze-gid + sc_silver_name=cp4a-file-retain-silver-gid + sc_gold_name=cp4a-file-retain-gold-gid + echo -e "\x1B[1;31m $sc_bronze_name \x1B[0m" + echo -e "\x1B[1;31m $sc_silver_name \x1B[0m" + echo -e "\x1B[1;31m $sc_gold_name \x1B[0m" +} + +function check_platform_version(){ + res=$(kubectl get nodes | awk 'NR==2{print $5}') + if [[ $res =~ v1.11 ]]; + then + PLATFORM_VERSION="3.11" + elif [[ $res =~ v1.14.6 ]]; + then + PLATFORM_VERSION="4.2" + elif [[ $res =~ v1.16.2 ]]; + then + PLATFORM_VERSION="4.3" + elif [[ $res =~ v1.17.1 ]]; + then + PLATFORM_VERSION="4.4" + else + echo -e "\x1B[1;31mUnable to determine OCP version with node version information: $res . Will NOT install/prepare common service for your deployment\x1B[0m" + fi +} + +function prepare_common_service(){ + + echo + echo -e "\x1B[1mThe script is preparing the custom resources (CR) files for OCP Common Services.  You are required to update (fill out) the necessary values in the CRs and deploy Common Services prior to the deployment. \x1B[0m" + echo -e "The prepared CRs for IBM common Services are located here: "${COMMON_SERVICES_CRD_DIRECTORY} + echo -e "After making changes to the CRs, execute the 'deploy_CS.sh' script to install Common Services." + echo -e "Done" +} + +function install_common_service_34(){ + + if [ "$INSTALL_BAI" == "Yes" ] ; then + echo -e "Preparing full Common Services Release 3.4 CR for BAI Deployment.." + func_operand_request_cr_bai_34 + + else + echo -e "Preparing minimal Common Services Release 3.4 CR for non-BAI Deployment.." + func_operand_request_cr_nonbai_34 + fi + + ## TODO: start to install common service + echo -e "\x1B[1mThe installation of Common Services has started.\x1B[0m" + #sh ./deploy_CS3.4.sh + nohup ./deploy_CS3.4.sh & + echo -e "Done" +} + +function install_common_service_33(){ + + func_operand_request_cr_nonbai_33 + echo -e "\x1B[1mThe installation of Common Services Release 3.3 for OCP 4.2+ has started.\x1B[0m" + sh ./deploy_CS3.3.sh + + echo -e "Done" +} + +function func_operand_request_cr_bai_34() +{ + + echo "Creating Common Services V3.4 Operand Request for BAI deployments on OCP 4.3+ ..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest_cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service + namespace: ibm-common-services +spec: + requests: + - registry: common-service + registryNamespace: ibm-common-services + operands: + - name: ibm-licensing-operator + - name: ibm-iam-operator + - name: ibm-monitoring-exporters-operator + - name: ibm-monitoring-prometheusext-operator + - name: ibm-monitoring-grafana-operator + - name: ibm-metering-operator + - name: ibm-management-ingress-operator + - name: ibm-commonui-operator +ENDF +} + + +function func_operand_request_cr_nonbai_34() +{ + + echo "Creating Common-Services V3.4 Operand Request for non-BAI deployments on OCP 4.3 ..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest_cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service + namespace: ibm-common-services +spec: + requests: + - registry: common-service + registryNamespace: ibm-common-services + operands: + - name: ibm-licensing-operator + - name: ibm-metering-operator +ENDF +} + + +function func_operand_request_cr_bai_33() +{ + + echo "Creating Common Services V3.3 Operand Request for BAI deployments on OCP 4.2+ ..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest_cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service +spec: + requests: + - registry: common-service + operands: + - name: ibm-cert-manager-operator + - name: ibm-mongodb-operator + - name: ibm-iam-operator + - name: ibm-monitoring-exporters-operator + - name: ibm-monitoring-prometheusext-operator + - name: ibm-monitoring-grafana-operator + - name: ibm-management-ingress-operator + - name: ibm-licensing-operator + - name: ibm-metering-operator + - name: ibm-commonui-operator +ENDF +} + + +function func_operand_request_cr_nonbai_33() +{ + + echo "Creating Common Services V3.3 Request Operand for non-BAI deployments on OCP 4.2+ ..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest_cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service +spec: + requests: + - registry: common-service + operands: + - name: ibm-cert-manager-operator + - name: ibm-mongodb-operator + - name: ibm-iam-operator + - name: ibm-management-ingress-operator + - name: ibm-licensing-operator + - name: ibm-metering-operator + - name: ibm-commonui-operator +ENDF +} + + +function show_summary(){ + + printf "\n" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1m Summary of input \x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1;31m1. Cloud platform to deploy: ${PLATFORM_SELECTED} ${PLATFORM_VERSION}\x1B[0m" + echo -e "\x1B[1;31m2. Project to deploy: ${project_name}\x1B[0m" + echo -e "\x1B[1;31m3. User selected: ${user_name}\x1B[0m" + if [[ $PLATFORM_SELECTED == "ROKS" ]]; + then + echo -e "\x1B[1;31m5. Storage Class created: \x1B[0m" + display_storage_classes_roks + fi + echo -e "\x1B[1m*******************************************************\x1B[0m" +} + + +function check_csoperator_exists() +{ + +project="common-service" +SUB="common-services-operator" + +check_project=`oc get project $project --ignore-not-found | wc -l` >/dev/null 2>&1 +check_operator=$(oc get sub -n common-service --ignore-not-found |grep ibm-common-service-operator) >/dev/null 2>&1 + +if [[ $check_project == 2 ]] || [[ "$check_operator" == *"$SUB"* ]]; then + echo "" + echo "Found an Existing Installation of IBM Common-Services...will be skipped..." >> ${LOG_FILE} + echo "Found an Existing Installation of IBM Common-Services...will be skipped..." + CS_INSTALL="NO" + exit 1 +fi + +} + + validate_cli -check_existing_sc +if [[ $1 == "dev" ]] +then + CS_INSTALL="YES" + +else + CS_INSTALL="NO" + +fi + +select_platform +check_platform_version +#select_deployment_type +if [[ $PLATFORM_SELECTED == "OCP" ]]; +then + check_existing_sc +fi collect_input #create_project bind_scc prepare_install #create_scc -tag_nodes -display_storage_classes -display_node_name +check_storage_class + + + +if [[ $PLATFORM_SELECTED == "OCP" ]]; +then + display_node_name +fi + +show_summary +check_csoperator_exists + +if [[ $PLATFORM_SELECTED == "OCP" ]] && [[ $PLATFORM_VERSION == "4.3" ]] || [[ $PLATFORM_VERSION == "4.4" ]]; +then + + if [ "$CS_INSTALL" != "YES" ]; then + display_installationprompt + echo "" + + nohup ./deploy_CS3.4.sh >> ${LOG_FILE} 2>&1 & + else + echo "Review mode: IBM Common Services will be skipped.." + fi +fi + +# Deploy CS 3.3 if OCP 4.2 or 3.11 as per requirements. The components for CS 3.3 in this case will only be Licensing and Metering (also CommonUI as a base requirment) +#if [[[ $PLATFORM_SELECTED == "OCP" ]] && [ $PLATFORM_VERSION == "4.2" ]]] || [[[ $PLATFORM_SELECTED == "OCP" ] && [ $PLATFORM_VERSION == "3.11" ]]] + +if [[ $PLATFORM_SELECTED == "OCP" ]] && [[ $PLATFORM_VERSION == "4.2" ]]; +then + echo "IBM Common Services with Metering & Licensing Components will be installed" + if [ "$CS_INSTALL" != "YES" ]; then + nohup ./deploy_CS3.3.sh >> ${LOG_FILE} 2>&1 & + else + echo "Review mode: IBM Common Services will be skipped.." + echo "" + fi +fi + +# Deploy CS 3.3 if OCP 3.11 +if [[ $PLATFORM_SELECTED == "OCP" ]] && [[ $PLATFORM_VERSION == "3.11" ]]; +then + echo "IBM Common Services with Metering & Licensing Components will be installed" + if [ "$CS_INSTALL" != "YES" ]; then + COMMON_SERVICES_INSTALL_DIRECTORY_OCP311=${PARENT_DIR}/descriptors/common-services/scripts/common-services.sh + sh ${COMMON_SERVICES_INSTALL_DIRECTORY_OCP311} install --async + else + echo "Review mode: IBM Common Services will be skipped.." + fi +fi + + clean_up +#set the project context back to the user generated one +oc project ${PROJ_NAME} > /dev/null + diff --git a/scripts/cp4a-deployment.sh b/scripts/cp4a-deployment.sh index 22dd4729..e3ccc907 100755 --- a/scripts/cp4a-deployment.sh +++ b/scripts/cp4a-deployment.sh @@ -1,5 +1,5 @@ #!/bin/bash -#set -x +# set -x ############################################################################### # # Licensed Materials - Property of IBM @@ -10,29 +10,16 @@ # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # ############################################################################### -unameOut="$(uname -s)" -case "${unameOut}" in - Linux*) machine="Linux";; - Darwin*) machine="Mac";; - CYGWIN*) machine="Cygwin";; - MINGW*) machine="MinGw";; - *) machine="UNKNOWN:${unameOut}" -esac - -if [[ "$machine" == "Mac" ]]; then - SED_COMMAND='sed -i ""' - SED_COMMAND_FORMAT='sed -i "" s/ //g' -else - SED_COMMAND='sed -i' - SED_COMMAND_FORMAT='sed -i s/\r//g' -fi CUR_DIR=$(cd $(dirname $0); pwd) PARENT_DIR=$(dirname "$PWD") +# Import common utilities and environment variables +source ${CUR_DIR}/helper/common.sh + DOCKER_RES_SECRET_NAME="admin.registrykey" DOCKER_REG_USER="" -if [[ $1 == "dev" ]] +if [[ $1 == "dev" || $1 == "review" ]] then DOCKER_REG_SERVER="cp.stg.icr.io" else @@ -40,105 +27,75 @@ else fi DOCKER_REG_KEY="" REGISTRY_IN_FILE="cp.icr.io" -OPERATOR_IMAGE=${DOCKER_REG_SERVER}/cp/cp4a/icp4a-operator:20.0.1 # Need change when release +OPERATOR_IMAGE=${DOCKER_REG_SERVER}/cp/cp4a/icp4a-operator:20.0.2 old_db2="docker.io\/ibmcom" old_ldap="osixia" old_db2_etcd="quay.io\/coreos" old_busybox="docker.io\/library" -TMEP_FOLDER=${CUR_DIR}/.tmp +TEMP_FOLDER=${CUR_DIR}/.tmp BAK_FOLDER=${CUR_DIR}/.bak +FINAL_CR_FOLDER=${CUR_DIR}/generated-cr +DEPLOY_TYPE_IN_FILE_NAME="" # Default value is empty OPERATOR_FILE=${PARENT_DIR}/descriptors/operator.yaml -OPERATOR_FILE_TMP=$TMEP_FOLDER/.operator_tmp.yaml +OPERATOR_FILE_TMP=$TEMP_FOLDER/.operator_tmp.yaml OPERATOR_FILE_BAK=$BAK_FOLDER/.operator.yaml OPERATOR_PVC_FILE=${PARENT_DIR}/descriptors/operator-shared-pvc.yaml -OPERATOR_PVC_FILE_TMP=$TMEP_FOLDER/.operator-shared-pvc_tmp.yaml +OPERATOR_PVC_FILE_TMP=$TEMP_FOLDER/.operator-shared-pvc_tmp.yaml OPERATOR_PVC_FILE_BAK=$BAK_FOLDER/.operator-shared-pvc.yaml -CONTENT_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_demo_content.yaml -CONTENT_PATTERN_FILE_TMP=$TMEP_FOLDER/.ibm_cp4a_cr_demo_content_tmp.yaml -CONTENT_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_demo_content.yaml - -APPLICATION_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_demo_application.yaml -APPLICATION_PATTERN_FILE_TMP=$TMEP_FOLDER/.ibm_cp4a_cr_demo_application_tmp.yaml -APPLICATION_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_demo_application.yaml - -ACA_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_demo_aca.yaml -ACA_PATTERN_FILE_TMP=$TMEP_FOLDER/.ibm_cp4a_cr_demo_aca_tmp.yaml -ACA_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_demo_aca.yaml - -WORKSTREAMS_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_demo_workstreams.yaml -WORKSTREAMS_PATTERN_FILE_TMP=$TMEP_FOLDER/.ibm_cp4a_cr_demo_workstreams_tmp.yaml -WORKSTREAMS_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_demo_workstreams.yaml - -DECISIONS_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_demo_decisions.yaml -DECISIONS_PATTERN_FILE_TMP=$TMEP_FOLDER/.ibm_cp4a_cr_demo_decisions_tmp.yaml -DECISIONS_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_demo_decisions.yaml - -DB2_JDBC_DRIVER_DIR=${CUR_DIR}/jdbc +FOUNDATION_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_final_tmp.yaml +FOUNDATION_PATTERN_FILE_BAK=$FINAL_CR_FOLDER/ibm_cp4a_cr_final.yaml +FOUNDATION_EXISTING_BAK=$TEMP_FOLDER/.ibm_cp4a_cr_final_existing_bak.yaml +JDBC_DRIVER_DIR=${CUR_DIR}/jdbc +PLATFORM_SELECTED="" PATTERN_SELECTED="" COMPONENTS_SELECTED="" +OPT_COMPONENTS_CR_SELECTED="" +OPT_COMPONENTS_SELECTED=() +LDAP_TYPE="" - -function validate_cli(){ - which oc &>/dev/null - [[ $? -ne 0 ]] && \ - echo "Unable to locate Openshift CLI, please install it first." && \ - exit 1 - - which timeout &>/dev/null - [[ $? -ne 0 ]] && \ - while true; do - printf "\x1B[1m\"timeout\" Command Not Found\n\x1B[0m" - printf "\x1B[1mThe \"timeout\" will be installed automatically\n\x1B[0m" - printf "\x1B[1mDo you accept (Yes/No, default: No): \x1B[0m" - read -rp "" ans - case "$ans" in - "y"|"Y"|"yes"|"Yes"|"YES") - install_timeout_cli - break - ;; - "n"|"N"|"no"|"No"|"NO") - echo -e "You do not accept, exiting...\n" - exit 0 - ;; - *) - echo -e "\x1B[1;31mYou do not accept, exiting....\n\x1B[0m" - exit 0 - ;; - esac - done -} - -function install_timeout_cli(){ - if [[ ${machine} = "Mac" ]]; then - echo -n "Installing timeout ......"; brew install coreutils >/dev/null 2>&1; sudo ln -s /usr/local/bin/gtimeout /usr/local/bin/timeout >/dev/null 2>&1; echo "done."; - fi - printf "\n" - } +FOUNDATION_CR_SELECTED="" +optional_component_arr=() +optional_component_cr_arr=() +foundation_component_arr=() +FOUNDATION_FULL_ARR=("BAN" "RR" "BAS" "UMS") +OPTIONAL_COMPONENT_FULL_ARR=("bai" "css" "cmis" "es" "ums" "ads_designer" "ads_runtime" "app_designer" "decisionCenter" "decisionServerRuntime" "decisionRunner") function prompt_license(){ clear - echo -e "\x1B[1;31mIMPORTANT: Review the IBM Cloud Pak for Automation license information here: \n\x1B[0m" - echo -e "\x1B[1;31mhttps://github.com/icp4a/cert-kubernetes/blob/20.0.1/LICENSE\n\x1B[0m" + + get_baw_mode + retVal_baw=$? + + if [[ $retVal_baw -eq 0 ]]; then + echo -e "\x1B[1;31mIMPORTANT: Review the IBM Business Automation Workflow license information here: \n\x1B[0m" + echo -e "\x1B[1;31mhttps://github.com/ibmbpm/BAW-Ctnr/blob/20.0.2/LICENSE\n\x1B[0m" + fi + if [[ $retVal_baw -eq 1 ]]; then + echo -e "\x1B[1;31mIMPORTANT: Review the IBM Cloud Pak for Automation license information here: \n\x1B[0m" + echo -e "\x1B[1;31mhttps://github.com/icp4a/cert-kubernetes/blob/20.0.2/LICENSE\n\x1B[0m" + fi + read -rsn1 -p"Press any key to continue";echo + + printf "\n" while true; do - printf "\n" - printf "\n" - printf "\x1B[1mDo you accept the IBM Cloud Pak for Automation license (Yes/No, default: No): \x1B[0m" - cp -r ${OPERATOR_FILE_BAK} ${OPERATOR_FILE_TMP} + if [[ $retVal_baw -eq 0 ]]; then + printf "\x1B[1mDo you accept the IBM Business Automation Workflow license (Yes/No, default: No): \x1B[0m" + fi + if [[ $retVal_baw -eq 1 ]]; then + printf "\x1B[1mDo you accept the IBM Cloud Pak for Automation license (Yes/No, default: No): \x1B[0m" + fi read -rp "" ans case "$ans" in "y"|"Y"|"yes"|"Yes"|"YES") echo -e "Installing the Cloud Pak for Automation Operator...\n" validate_cli - ${SED_COMMAND} '/dba_license/{n;s/value:/value: accept/;}' ${OPERATOR_FILE_TMP} - # yaml set ${OPERATOR_FILE_BAK} spec.template.spec.containers.1.env.4.value accept > ${OPERATOR_FILE_TMP} - cp -rf ${OPERATOR_FILE_TMP} ${OPERATOR_FILE_BAK} break ;; "n"|"N"|"no"|"No"|"NO") @@ -146,141 +103,700 @@ function prompt_license(){ exit 0 ;; *) - echo -e "\x1B[1;31mYou did not accept the license, exiting...\n\x1B[0m" - exit 0 + echo -e "Answer must be \"Yes\" or \"No\"\n" ;; esac done } -function select_pattern(){ - # options=("FileNet Content Manager" "Automation Content Analyzer" "Operational Decision Manager" "Automation Workstream Services" "Automation Applications") - - # menu() { - # echo -e "\x1B[1mCloud Pak for Automation capabilities:\x1B[0m" - # for i in ${!options[@]}; do - # printf "%3d%s) %s\n" $((i+1)) "${choices_pattern[i]:- }" "${options[i]}" - # done - # if [[ "$msg" ]]; then echo "$msg"; fi - # } - - # prompt="Check an pattern (again to uncheck, ENTER when done): " - # while menu && read -rp "$prompt" num && [[ "$num" ]]; do - # [[ "$num" != *[![:digit:]]* ]] && - # (( num > 0 && num <= ${#options[@]} )) || - # { msg="Invalid option: $num"; continue; } - # ((num--)); msg="${options[num]} was ${choices_pattern[num]:+un}checked" - # [[ "${choices_pattern[num]}" ]] && choices_pattern[num]="" || choices_pattern[num]="+" - # done - - # printf "Pattern selected"; msg=" nothing" - # for i in ${!options[@]}; do - # [[ "${choices_pattern[i]}" ]] && { printf " %s" "${options[i]}"; msg=""; } - # done - # echo "$msg" - # export PATTERN_SELECTED="$msg" + +function validate_docker_podman_cli(){ + if [[ $OCP_VERSION == "3.11" || "$machine" == "Mac" ]];then + which docker &>/dev/null + [[ $? -ne 0 ]] && \ + echo -e "\x1B[1;31mUnable to locate docker, please install it firstly.\x1B[0m" && \ + exit 1 + elif [[ $OCP_VERSION == "4.1" || $OCP_VERSION == "4.2" || $OCP_VERSION == "4.3" || $OCP_VERSION == "4.4" ]] + then + which podman &>/dev/null + [[ $? -ne 0 ]] && \ + echo -e "\x1B[1;31mUnable to locate podman, please install it firstly.\x1B[0m" && \ + exit 1 + fi +} + +function containsElement(){ + local e match="$1" + shift + for e; do [[ "$e" == "$match" ]] && return 0; done + return 1 +} + +function get_baw_mode(){ + if [ -f "$OPERATOR_FILE" ]; then + content_start="$(grep -n "env:" ${OPERATOR_FILE} | head -n 1 | cut -d: -f1)" + content_stop="$(tail -n +$content_start < ${OPERATOR_FILE} | grep -n "name: delivery_type" | head -n1 | cut -d: -f1)" + + if [ -z $content_stop ]; then + return 1 + else + content_stop=$(( $content_stop + $content_start - 1)) + baw_mode="$(tail -n +$content_stop < ${OPERATOR_FILE} | grep -n "value: " | head -n1 | cut -d: -f3)" + baw_mode=`echo $baw_mode | sed "s/\"//g"` + # echo -e "$baw_mode" + if [[ "${baw_mode}" == "baw" ]]; then + return 0 + else + return 1 + fi + fi + else + echo -e "\x1B[1;31m\"${OPERATOR_FILE}\" FILE NOT FOUND\x1B[0m" + exit 0 + fi +} + +function select_platform(){ + printf "\n" + echo -e "\x1B[1mSelect the cloud platform to deploy: \x1B[0m" COLUMNS=12 - echo -e "\x1B[1mSelect the Cloud Pak for Automation capability to install: \x1B[0m" - options=("FileNet Content Manager" "Automation Content Analyzer" "Operational Decision Manager" "Automation Workstream Services" "Automation Applications") - PS3='Enter a valid option [1 to 5]: ' - select opt in "${options[@]}" - do - case $opt in - "FileNet Content Manager") - PATTERN_SELECTED=$opt + options=("Openshift Container Platform (OCP) - Private Cloud" "Other ( Certified Kubernetes Cloud Platform / CNCF)") + + if [ -z "$existing_platform_type" ]; then + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "RedHat OpenShift Kubernetes Service (ROKS) - Public Cloud") + PLATFORM_SELECTED="ROKS" + break + ;; + "Openshift Container Platform (OCP) - Private Cloud") + PLATFORM_SELECTED="OCP" + break + ;; + "Other ( Certified Kubernetes Cloud Platform / CNCF)") + PLATFORM_SELECTED="other" + break + ;; + *) echo "invalid option $REPLY";; + esac + done + else + options_var=("OCP" "other") + for i in ${!options_var[@]}; do + if [[ "${options_var[i]}" == "$existing_platform_type" ]]; then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${options[i]}" "(Selected)" + else + printf "%1d) %s\n" $((i+1)) "${options[i]}" + fi + done + echo -e "\x1B[1;31mExisting platform type found in CR: \"$existing_platform_type\"\x1B[0m" + echo -e "\x1B[1;31mDo not need to select again.\n\x1B[0m" + read -rsn1 -p"Press any key to continue ...";echo + fi + + if [[ "$PLATFORM_SELECTED" == "OCP" ]]; then + CLI_CMD=oc + elif [[ "$PLATFORM_SELECTED" == "other" ]] + then + CLI_CMD=kubectl + fi +} + +function check_ocp_version(){ + if [[ ${PLATFORM_SELECTED} == "OCP" ]];then + k8s_version=`${CLI_CMD} version | grep v[1-9]\. | tail -n1` + while true; do + case "$k8s_version" in + *v1.17.*) + OCP_VERSION="4.4" break ;; - "Automation Content Analyzer") - PATTERN_SELECTED=$opt + *v1.16.*) + OCP_VERSION="4.3" break ;; - "Operational Decision Manager") - PATTERN_SELECTED=$opt + *v1.14.*) + OCP_VERSION="4.2" break ;; - "Automation Workstream Services") - PATTERN_SELECTED=$opt + *v1.13.*) + OCP_VERSION="4.1" break ;; - "Automation Applications") - PATTERN_SELECTED=$opt + *v1.11.*) + OCP_VERSION="3.11" break ;; - *) echo "invalid option $REPLY";; + *) + printf "Do not get the version of Openshift Container Platform (OCP), existing ..." + exit 1 + ;; + esac + done + fi +} + +function select_baw_iaws_installation(){ + + INSTALL_BAW_IAWS="" + + get_baw_mode + retVal_baw=$? + + while true; do + if [[ $retVal_baw -eq 0 ]]; then + printf "\x1B[1mDo you plan to install Business Automation Workflow?\n\x1B[0m" + fi + if [[ $retVal_baw -eq 1 ]]; then + printf "\n" + printf "\x1B[1mDo you plan to install Business Automation Workflow and/or Automation Workstream Services?\n\x1B[0m" + fi + printf "Enter a valid option [Yes, No]: " + read -rp "" ans + + case "$ans" in + "y"|"Y"|"yes"|"Yes"|"YES") + INSTALL_BAW_IAWS="Yes" + break + ;; + "n"|"N"|"no"|"No"|"NO") + INSTALL_BAW_IAWS="No" + break + ;; + *) + echo -e "Answer must be \"Yes\" or \"No\"\n" + INSTALL_BAW_IAWS="" + ;; esac done } +function select_pattern(){ +# This function support mutiple checkbox, if do not select anything, it will return None + + PATTERNS_SELECTED="" + choices_pattern=() + pattern_arr=() + if [[ "${PLATFORM_SELECTED}" == "OCP" && "${DEPLOYMENT_TYPE}" == "enterprise" ]]; + then + options=("FileNet Content Manager" "Automation Content Analyzer" "Operational Decision Manager" "Automation Decision Services" "Business Automation Application" "Automation Digital Worker") + options_cr_val=("content" "contentanalyzer" "decisions" "decisions_ads" "application" "digitalworker") + foundation_0=("BAN" "RR") # Foundation for FileNet Content Manager + foundation_1=("BAN" "RR" "BAS" "UMS") # Foundation for Automation Content Analyzer + foundation_2=("BAN" "RR") # Foundation for Operational Decision Manager + foundation_3=("BAN" "RR" "UMS") # Foundation for Automation Decision Services + # foundation_4=("BAN" "RR" "UMS") # Foundation for Business Automation Workflow (Demo) + foundation_4=("BAN" "RR" "UMS") # Foundation for Business Automation Applications (full) + foundation_5=("BAN" "RR" "BAS" "UMS") # Foundation for Automation Digital Worker + # foundation_7=("BAN" "RR" "UMS") # # Foundation for Business Automation Applications (production) + else + options=("FileNet Content Manager" "Automation Content Analyzer" "Operational Decision Manager" "Automation Decision Services" "Business Automation Application" "Automation Digital Worker") + options_cr_val=("content" "contentanalyzer" "decisions" "decisions_ads" "application" "digitalworker") + foundation_0=("BAN" "RR") # Foundation for FileNet Content Manager + foundation_1=("BAN" "RR" "BAS" "UMS") # Foundation for Automation Content Analyzer + foundation_2=("BAN" "RR") # Foundation for Operational Decision Manager + foundation_3=("BAN" "RR" "UMS") # Foundation for Automation Decision Services + # foundation_4=("BAN" "RR" "UMS") # Foundation for Business Automation Workflow&Worksteams (Demo) + foundation_4=("BAN" "RR" "UMS") # Foundation for Business Automation Applications (full) + foundation_5=("BAN" "RR" "BAS" "UMS") # Foundation for Automation Digital Worker + # foundation_7=("BAN" "RR" "UMS") # # Foundation for Business Automation Applications (production) + fi + + + tips1="\x1B[1;31mTips\x1B[0m:\x1B[1mPress [ENTER] to accept the default (None of the patterns is selected)\x1B[0m" + tips2="\x1B[1;31mTips\x1B[0m:\x1B[1mPress [ENTER] when you are done\x1B[0m" + pattern_tips="\x1B[1mInfo: Business Automation Navigator will be automatically installed in the environment as it is part of the Cloud Pak for Automation foundation platform. \n\nTips:  After you make your first selection you will be able to make additional selections since you can combine multiple selections. \n\n\x1B[0m" + indexof() { + i=-1 + for ((j=0;j<${#options_cr_val[@]};j++)); + do [ "${options_cr_val[$j]}" = "$1" ] && { i=$j; break; } + done + echo $i + } + menu() { + clear + echo -e "\x1B[1mSelect the Cloud Pak for Automation capability to install: \x1B[0m" + for i in ${!options[@]}; do + + containsElement "${options_cr_val[i]}" "${EXISTING_PATTERN_ARR[@]}" + retVal=$? + if [ $retVal -ne 0 ]; then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${options[i]}" "${choices_pattern[i]}" + else + if [[ "${choices_pattern[i]}" == "(To Be Uninstalled)" ]]; then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${options[i]}" "${choices_pattern[i]}" + else + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${options[i]}" "(Installed)" + fi + fi + done + if [[ "$msg" ]]; then echo "$msg"; fi + printf "\n" + + echo -e "${pattern_tips}" + # Show different tips according components select or unselect + containsElement "(Selected)" "${choices_pattern[@]}" + retVal=$? + if [ $retVal -ne 0 ]; then + echo -e "${tips1}" + else + echo -e "${tips2}" + fi + + } + + prompt="Enter a valid option [1 to ${#options[@]}]: " + while menu && read -rp "$prompt" num && [[ "$num" ]]; do + [[ "$num" != *[![:digit:]]* ]] && + (( num > 0 && num <= ${#options[@]} )) || + { msg="Invalid option: $num"; continue; } + ((num--)); + containsElement "${options_cr_val[num]}" "${EXISTING_PATTERN_ARR[@]}" + retVal=$? + if [ $retVal -ne 0 ]; then + [[ "${choices_pattern[num]}" ]] && choices_pattern[num]="" || choices_pattern[num]="(Selected)" + else + [[ "${choices_pattern[num]}" ]] && choices_pattern[num]="" || choices_pattern[num]="(To Be Uninstalled)" + fi + done + + # Generate list of the pattern which will be installed or To Be Uninstalled + for i in ${!options[@]}; do + array_varname=foundation_$i[@] + containsElement "${options_cr_val[i]}" "${EXISTING_PATTERN_ARR[@]}" + retVal=$? + if [ $retVal -ne 0 ]; then + [[ "${choices_pattern[i]}" ]] && { pattern_arr=( "${pattern_arr[@]}" "${options[i]}" ); pattern_cr_arr=( "${pattern_cr_arr[@]}" "${options_cr_val[i]}" ); msg=""; } + # if [[ "${options[i]}" == "Business Automation Application" && "${DEPLOYMENT_TYPE}" == "enterprise" ]] ; then + # [[ "${choices_pattern[i]}" ]] && { foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_7[@]}" ); } + # else + [[ "${choices_pattern[i]}" ]] && { foundation_component_arr=( "${foundation_component_arr[@]}" "${!array_varname}" ); } + # fi + else + if [[ "${choices_pattern[i]}" == "(To Be Uninstalled)" ]]; then + pos=`indexof "${pattern_cr_arr[i]}"` + if [[ "$pos" != "-1" ]]; then + { pattern_cr_arr=(${pattern_cr_arr[@]:0:$pos} ${pattern_cr_arr[@]:$(($pos + 1))}); pattern_arr=(${pattern_arr[@]:0:$pos} ${pattern_arr[@]:$(($pos + 1))}); } + + fi + else + { pattern_arr=( "${pattern_arr[@]}" "${options[i]}" ); pattern_cr_arr=( "${pattern_cr_arr[@]}" "${options_cr_val[i]}" ); msg=""; } + # if [[ "${options[i]}" == "Business Automation Application" && "${DEPLOYMENT_TYPE}" == "enterprise" ]] ; then + # { foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_7[@]}" ); } + # else + { foundation_component_arr=( "${foundation_component_arr[@]}" "${!array_varname}" ); } + # fi + fi + fi + done + # echo -e "$msg" + + if [ "${#pattern_arr[@]}" -eq "0" ]; then + PATTERNS_SELECTED="None" + printf "\x1B[1;31mPlease select one pattern at least, exiting... \n\x1B[0m" + exit 1 + else + PATTERNS_SELECTED=$( IFS=$','; echo "${pattern_arr[*]}" ) + PATTERNS_CR_SELECTED=$( IFS=$','; echo "${pattern_cr_arr[*]}" ) + + fi + + FOUNDATION_CR_SELECTED=($(echo "${foundation_component_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) + # FOUNDATION_CR_SELECTED_LOWCASE=( "${FOUNDATION_CR_SELECTED[@],,}" ) + + x=0;while [ ${x} -lt ${#FOUNDATION_CR_SELECTED[*]} ] ; do FOUNDATION_CR_SELECTED_LOWCASE[$x]=$(tr [A-Z] [a-z] <<< ${FOUNDATION_CR_SELECTED[$x]}); let x++; done + FOUNDATION_DELETE_LIST=($(echo "${FOUNDATION_CR_SELECTED[@]}" "${FOUNDATION_FULL_ARR[@]}" | tr ' ' '\n' | sort | uniq -u)) + + PATTERNS_CR_SELECTED=($(echo "${pattern_cr_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) +} + function select_optional_component(){ # This function support mutiple checkbox, if do not select anything, it will return - COMPONENTS_SELECTED="" - COLUMNS=12 - menu_content(){ - options=("None" "Content Manager Interoperability Service (CMIS)") - PS3="Enter a valid option [1 to ${#options[@]}]: " - select opt in "${options[@]}" - do - case $opt in - "None") - COMPONENTS_SELECTED="None" + OPT_COMPONENTS_CR_SELECTED=() + OPTIONAL_COMPONENT_DELETE_LIST=() + KEEP_COMPOMENTS=() + OPT_COMPONENTS_SELECTED=() + optional_component_arr=() + optional_component_cr_arr=() + BAI_SELECTED="" + show_optional_components(){ + COMPONENTS_SELECTED="" + choices_component=() + component_arr=() + + tips1="\x1B[1;31mTips\x1B[0m:\x1B[1m Press [ENTER] to accept the default (None of the components is selected)\x1B[0m" + tips2="\x1B[1;31mTips\x1B[0m:\x1B[1m Press [ENTER] when you are done\x1B[0m" + ads_tips="\x1B[1mTips:\x1B[0m Decision Designer is typically required if you are deploying a development or test environment.\nThis feature will automatically install Business Automation Studio, if not already present. \n\nDecision Runtime is typically recommended if you are deploying a test or production environment. \n\nYou should choose at least one these features to have a minimum environment configuration.\n" + if [[ $DEPLOYMENT_TYPE == "demo" ]];then + decision_tips="\x1B[1mTips:\x1B[0m Decision Center, Rule Execution Server and Decision Runner will be installed by default.\n" + else + decision_tips="\x1B[1mTips:\x1B[0m Decision Center is typically required for development and testing environments. \nRule Execution Server is typically required for testing and production environments and for using Business Automation Insights. \nYou should choose at least one these 2 features to have a minimum environment configuration. \n" + fi + application_tips="\x1B[1mTips:\x1B[0m Application Designer is typically required if you are deploying a development or test environment.\nThis feature will automatically install Business Automation Studio, if not already present. \n\nApplication Engine is automatically installed in the environment.  \n\nMake your selection or press enter to proceed. \n" + + indexof() { + i=-1 + for ((j=0;j<${#optional_component_cr_arr[@]};j++)); + do [ "${optional_component_cr_arr[$j]}" = "$1" ] && { i=$j; break; } + done + echo $i + } + menu() { + clear + echo -e "\x1B[1;31mPattern \"$item_pattern\": \x1B[0m\x1B[1mSelect optional components: \x1B[0m" + # echo -e "\x1B[1mSelect optional components: \x1B[0m" + containsElement "bai" "${EXISTING_OPT_COMPONENT_ARR[@]}" + bai_cr_retVal=$? + for i in ${!optional_components_list[@]}; do + if [[ ("${choices_component[i]}" == "(Selected)" || "${choices_component[i]}" == "(Installed)") && "${optional_components_list[i]}" == "Business Automation Insights" ]];then + BAI_SELECTED="Yes" + elif [[ ( $bai_cr_retVal -ne 0 || "${choices_component[i]}" == "(To Be Uninstalled)") && "${optional_components_list[i]}" == "Business Automation Insights" ]] + then + BAI_SELECTED="No" + fi + done + + for i in ${!optional_components_list[@]}; do + containsElement "${optional_components_cr_list[i]}" "${EXISTING_OPT_COMPONENT_ARR[@]}" + retVal=$? + containsElement "${optional_components_cr_list[i]}" "${optional_component_cr_arr[@]}" + selectedVal=$? + if [ $retVal -ne 0 ]; then + if [[ "${item_pattern}" == "FileNet Content Manager" || ( "${item_pattern}" == "Operational Decision Manager" && "${DEPLOYMENT_TYPE}" == "enterprise" ) ]];then + if [[ "${optional_components_list[i]}" == "User Management Service" && "${BAI_SELECTED}" == "Yes" ]];then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "(Selected)" + elif [ $selectedVal -ne 0 ] + then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "${choices_component[i]}" + else + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "(Selected)" + fi + else + if [ $selectedVal -ne 0 ]; then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "${choices_component[i]}" + else + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "(Selected)" + fi + fi + else + if [[ "${optional_components_list[i]}" == "User Management Service" ]];then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "(Installed)" + elif [[ "${choices_component[i]}" == "(To Be Uninstalled)" ]] + then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "${choices_component[i]}" + else + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${optional_components_list[i]}" "(Installed)" + if [[ "${optional_components_cr_list[i]}" == "bai" ]];then + BAI_SELECTED="Yes" + fi + fi + fi + done + if [[ "$msg" ]]; then echo "$msg"; fi + printf "\n" + + if [[ "${item_pattern}" == "Automation Decision Services" ]]; then + echo -e "${ads_tips}" + fi + if [[ "${item_pattern}" == "Operational Decision Manager" ]]; then + echo -e "${decision_tips}" + fi + if [[ "${item_pattern}" == "Business Automation Application" ]]; then + echo -e "${application_tips}" + fi + + + # Show different tips according components select or unselect + containsElement "(Selected)" "${choices_component[@]}" + retVal=$? + if [ $retVal -eq 0 ]; then + echo -e "${tips2}" + elif [ $selectedVal -eq 0 ] + then + echo -e "${tips2}" + else + echo -e "${tips1}" + fi + + } + + prompt="Enter a valid option [1 to ${#optional_components_list[@]}]: " + while menu && read -rp "$prompt" num && [[ "$num" ]]; do + [[ "$num" != *[![:digit:]]* ]] && + (( num > 0 && num <= ${#optional_components_list[@]} )) || + { msg="Invalid option: $num"; continue; } + ((num--)); + + containsElement "${optional_components_cr_list[num]}" "${EXISTING_OPT_COMPONENT_ARR[@]}" + retVal=$? + if [ $retVal -ne 0 ]; then + [[ "${choices_component[num]}" ]] && choices_component[num]="" || choices_component[num]="(Selected)" + if [[ "${item_pattern}" == "FileNet Content Manager" || ( "${item_pattern}" == "Operational Decision Manager" && "${DEPLOYMENT_TYPE}" == "enterprise" ) ]];then + if [[ "${optional_components_cr_list[num]}" == "bai" && ${choices_component[num]} == "(Selected)" ]];then + choices_component[num-1]="(Selected)" + fi + if [[ "${optional_components_cr_list[num]}" == "ums" && ${choices_component[num+1]} == "(Selected)" ]];then + choices_component[num]="(Selected)" + fi + fi + else + containsElement "ums" "${EXISTING_OPT_COMPONENT_ARR[@]}" + ums_retVal=$? + containsElement "bai" "${EXISTING_OPT_COMPONENT_ARR[@]}" + bai_retVal=$? + if [[ "${optional_components_cr_list[num]}" == "bai" && $ums_retVal -eq 0 ]];then + ums_check_num=num-1 + if [[ "${choices_component[num]}" == "(To Be Uninstalled)" ]];then + [[ "${choices_component[num]}" ]] && choices_component[num]="" || choices_component[num]="" + [[ "${choices_component[num]}" ]] && choices_component[num]="" || choices_component[ums_check_num]="" + else + [[ "${choices_component[num]}" ]] && choices_component[num]="" || choices_component[num]="(To Be Uninstalled)" + fi + elif [[ "${optional_components_cr_list[num]}" == "ums" && $bai_retVal -eq 0 && ("${choices_component[num+1]}" == "" || "${choices_component[num+1]}" == "(Installed)") ]] + then + [[ "${choices_component[num]}" ]] && choices_component[num]="" || choices_component[num]="" + else + [[ "${choices_component[num]}" ]] && choices_component[num]="" || choices_component[num]="(To Be Uninstalled)" + fi + fi + done + + # printf "\x1B[1mCOMPONENTS selected: \x1B[0m"; msg=" None" + for i in ${!optional_components_list[@]}; do + # [[ "${choices_component[i]}" ]] && { printf " \"%s\"" "${optional_components_list[i]}"; msg=""; } + + containsElement "${optional_components_cr_list[i]}" "${EXISTING_OPT_COMPONENT_ARR[@]}" + retVal=$? + if [ $retVal -ne 0 ]; then + # [[ "${choices_component[i]}" ]] && { pattern_arr=( "${pattern_arr[@]}" "${options[i]}" ); pattern_cr_arr=( "${pattern_cr_arr[@]}" "${options_cr_val[i]}" ); msg=""; } + if [[ "${optional_components_list[i]}" == "External Share" ]]; then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "ExternalShare" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Task Manager" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "TaskManager" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Content Search Services" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "ContentSearchServices" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Decision Center" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "DecisionCenter" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Rule Execution Server" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "RuleExecutionServer" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Decision Runner" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "DecisionRunner" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Decision Designer" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "DecisionDesigner" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Decision Runtime" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "DecisionRuntime" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Content Management Interoperability Services" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "ContentManagementInteroperabilityServices" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "User Management Service" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "UserManagementService" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Business Automation Insights" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "BusinessAutomationInsights" ); msg=""; } + elif [[ "${optional_components_list[i]}" == "Application Designer" ]] + then + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "ApplicationDesigner" ); msg=""; } + else + [[ "${choices_component[i]}" ]] && { optional_component_arr=( "${optional_component_arr[@]}" "${optional_components_list[i]}" ); msg=""; } + fi + [[ "${choices_component[i]}" ]] && { optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "${optional_components_cr_list[i]}" ); msg=""; } + else + if [[ "${choices_component[i]}" == "(To Be Uninstalled)" ]]; then + pos=`indexof "${optional_component_cr_arr[i]}"` + if [[ "$pos" != "-1" ]]; then + { optional_component_cr_arr=(${optional_component_cr_arr[@]:0:$pos} ${optional_component_cr_arr[@]:$(($pos + 1))}); optional_component_arr=(${optional_component_arr[@]:0:$pos} ${optional_component_arr[@]:$(($pos + 1))}); } + fi + else + if [[ "${optional_components_list[i]}" == "External Share" ]]; then + optional_component_arr=( "${optional_component_arr[@]}" "ExternalShare" ) + elif [[ "${optional_components_list[i]}" == "Task Manager" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "TaskManager" ) + elif [[ "${optional_components_list[i]}" == "Content Search Services" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "ContentSearchServices" ) + elif [[ "${optional_components_list[i]}" == "Decision Center" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "DecisionCenter" ) + elif [[ "${optional_components_list[i]}" == "Rule Execution Server" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "RuleExecutionServer" ) + elif [[ "${optional_components_list[i]}" == "Decision Runner" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "DecisionRunner" ) + elif [[ "${optional_components_list[i]}" == "Decision Designer" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "DecisionDesigner" ) + elif [[ "${optional_components_list[i]}" == "Decision Runtime" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "DecisionRuntime" ) + elif [[ "${optional_components_list[i]}" == "Content Management Interoperability Services" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "ContentManagementInteroperabilityServices" ) + elif [[ "${optional_components_list[i]}" == "User Management Service" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "UserManagementService" ) + elif [[ "${optional_components_list[i]}" == "Business Automation Insights" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "BusinessAutomationInsights" ) + elif [[ "${optional_components_list[i]}" == "Application Designer" ]] + then + optional_component_arr=( "${optional_component_arr[@]}" "ApplicationDesigner" ) + else + optional_component_arr=( "${optional_component_arr[@]}" "${optional_components_list[i]}" ) + fi + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "${optional_components_cr_list[i]}" ) + fi + fi + done + # echo -e "$msg" + + if [ "${#optional_component_arr[@]}" -eq "0" ]; then + COMPONENTS_SELECTED="None" + else + OPT_COMPONENTS_CR_SELECTED=$( IFS=$','; echo "${optional_component_arr[*]}" ) + + fi + } + for item_pattern in "${pattern_arr[@]}"; do + while true; do + case $item_pattern in + "FileNet Content Manager") + # echo "select $item_pattern pattern optional components" + if [[ $DEPLOYMENT_TYPE == "demo" ]];then + optional_components_list=("Content Search Services" "Content Management Interoperability Services" "User Management Service" "Business Automation Insights") + optional_components_cr_list=("css" "cmis" "ums" "bai") + elif [[ $DEPLOYMENT_TYPE == "enterprise" ]] + then + optional_components_list=("Content Search Services" "Content Management Interoperability Services" "External Share" "User Management Service" "Business Automation Insights") + optional_components_cr_list=("css" "cmis" "es" "ums" "bai") + fi + show_optional_components + containsElement "bai" "${optional_component_cr_arr[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "ums" ) + optional_component_arr=( "${optional_component_arr[@]}" "UserManagementService" ) + fi + optional_components_list=() + optional_components_cr_list=() break ;; - "Content Manager Interoperability Service (CMIS)") - COMPONENTS_SELECTED="cmis" + "Automation Content Analyzer") + # echo "Without optional components for $item_pattern pattern." + optional_components_list=() + optional_components_cr_list=() break ;; - *) echo "invalid option $REPLY";; - esac - done - } - menu_aca(){ - options=("None" "LDAP" "UMS") - PS3="Enter a valid option [1 to ${#options[@]}]: " - select opt in "${options[@]}" - do - case $opt in - "None") - COMPONENTS_SELECTED="None" + "Operational Decision Manager") + # echo "select $item_pattern pattern optional components" + if [[ "${DEPLOYMENT_TYPE}" == "demo" ]]; then + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "decisionCenter" ) + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "decisionServerRuntime" ) + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "decisionRunner" ) + optional_components_list=("Business Automation Insights") + optional_components_cr_list=("bai") + else + optional_components_list=("Decision Center" "Rule Execution Server" "Decision Runner" "User Management Service" "Business Automation Insights") + optional_components_cr_list=("decisionCenter" "decisionServerRuntime" "decisionRunner" "ums" "bai") + fi + show_optional_components + containsElement "bai" "${optional_component_cr_arr[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "ums" ) + optional_component_arr=( "${optional_component_arr[@]}" "UserManagementService" ) + fi + optional_components_list=() + optional_components_cr_list=() break ;; - "LDAP") - COMPONENTS_SELECTED="ldap" + "Automation Decision Services") + # echo "select $item_pattern pattern optional components" + if [[ "${DEPLOYMENT_TYPE}" == "demo" ]]; then + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "ads_designer" ) + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "ads_runtime" ) + optional_components_list=() + optional_components_cr_list=() + else + optional_components_list=("Decision Designer" "Decision Runtime") + optional_components_cr_list=("ads_designer" "ads_runtime") + show_optional_components + optional_components_list=() + optional_components_cr_list=() + fi + break + ;; + "Business Automation Workflow"|"Business Automation Workflow and Automation Workstream Services") + # echo "Without optional components for $item_pattern pattern." + if [[ $DEPLOYMENT_TYPE == "demo" ]]; then + optional_components_list=("Business Automation Insights") + optional_components_cr_list=("bai") + show_optional_components + fi + if [[ $DEPLOYMENT_TYPE == "enterprise" ]]; then + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "bai" ) + optional_component_arr=( "${optional_component_arr[@]}" "BusinessAutomationInsights" ) + fi + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "cmis" ) + optional_components_list=() + optional_components_cr_list=() break ;; - "UMS") - COMPONENTS_SELECTED="ums" + "Automation Workstream Services") + # echo "Without optional components for $item_pattern pattern." + optional_component_cr_arr=( "${optional_component_cr_arr[@]}" "cmis" ) + optional_components_list=() + optional_components_cr_list=() + break + ;; + "Business Automation Application") + if [[ $DEPLOYMENT_TYPE == "enterprise" ]]; then + # echo "select $item_pattern pattern optional components" + optional_components_list=("Application Designer") + optional_components_cr_list=("app_designer") + show_optional_components + optional_components_list=() + optional_components_cr_list=() + else + optional_components_list=() + optional_components_cr_list=() + fi + break + ;; + "Automation Digital Worker") + optional_components_list=("Business Automation Insights") + optional_components_cr_list=("bai") + show_optional_components + optional_components_list=() + optional_components_cr_list=() break ;; - *) echo "invalid option $REPLY";; esac done - } - while true; do - case $PATTERN_SELECTED in - "FileNet Content Manager") - echo -e "\x1B[1m$PATTERN_SELECTED: Optional component(s) to deploy: \x1B[0m" - menu_content - break - ;; - "Automation Content Analyzer") - echo -e "\x1B[1m$PATTERN_SELECTED: Optional component(s) to deploy: \x1B[0m" - menu_aca - break - ;; - *) - # printf "\x1B[1mNone optional components for \"$PATTERN_SELECTED\"\n\x1B[0m" - COMPONENTS_SELECTED="None" - break - ;; - esac done + + OPT_COMPONENTS_CR_SELECTED=($(echo "${optional_component_cr_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) + OPTIONAL_COMPONENT_DELETE_LIST=($(echo "${OPT_COMPONENTS_CR_SELECTED[@]}" "${OPTIONAL_COMPONENT_FULL_ARR[@]}" | tr ' ' '\n' | sort | uniq -u)) + KEEP_COMPOMENTS=($(echo ${FOUNDATION_CR_SELECTED_LOWCASE[@]} ${OPTIONAL_COMPONENT_DELETE_LIST[@]} | tr ' ' '\n' | sort | uniq -d | uniq)) + OPT_COMPONENTS_SELECTED=($(echo "${optional_component_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) } function get_local_registry_password(){ printf "\n" printf "\x1B[1mEnter the password for your docker registry: \x1B[0m" local_registry_password="" - while [[ $local_registry_password == "" ]]; # While confirmation is not y or n... + while [[ $local_registry_password == "" ]]; do read -rsp "" local_registry_password if [ -z "$local_registry_password" ]; then @@ -333,8 +849,15 @@ function get_entitlement_registry(){ docker_image_exists() { local image_full_name="$1"; shift local wait_time="${1:-5}" - local search_term='Pulling|is up to date|not found|no pull access' - local result=$((timeout --preserve-status "$wait_time" docker 2>&1 pull "$image_full_name" &) | grep -v 'Pulling repository' | egrep -o "$search_term") + local search_term='Pulling|Copying|is up to date|already exists|not found|unable to pull image|no pull access' + if [[ $OCP_VERSION == "3.11" ]];then + local result=$((timeout --preserve-status "$wait_time" docker 2>&1 pull "$image_full_name" &) | grep -v 'Pulling repository' | egrep -o "$search_term") + + elif [[ $OCP_VERSION == "4.1" || $OCP_VERSION == "4.2" || $OCP_VERSION == "4.3" || $OCP_VERSION == "4.4" ]] + then + local result=$((timeout --preserve-status "$wait_time" podman 2>&1 pull "$image_full_name" &) | grep -v 'Pulling repository' | egrep -o "$search_term") + + fi test "$result" || { echo "Timed out too soon. Try using a wait_time greater than $wait_time..."; return 1 ;} echo $result | grep -vq 'not found' } @@ -344,7 +867,7 @@ function get_entitlement_registry(){ printf "\n" printf "\n" printf "\x1B[1;31mFollow the instructions on how to get your Entitlement Registry key: \n\x1B[0m" - printf "\x1B[1;31mhttps://github.com/icp4a/cert-kubernetes/blob/20.0.1/platform/ocp/install.md\n\x1B[0m" + printf "\x1B[1;31mhttps://github.com/icp4a/cert-kubernetes/blob/20.0.2/platform/ocp/install.md\n\x1B[0m" printf "\n" printf "\x1B[1mDo you have a Cloud Pak for Automation Entitlement Registry key (Yes/No, default: No): \x1B[0m" while true; do @@ -375,31 +898,26 @@ function get_entitlement_registry(){ while [[ $entitlement_verify_passed == '' ]] do printf "\x1B[1mVerifying the Entitlement Registry key...\n\x1B[0m" - if [[ $entitlement_key == iamapikey:* ]] ; + if [[ $OCP_VERSION == "3.11" || "$machine" == "Mac" || $PLATFORM_SELECTED == "other" ]];then + if docker login -u "$DOCKER_REG_USER" -p "$DOCKER_REG_KEY" "$DOCKER_REG_SERVER"; then + printf 'Entitlement Registry key is valid.\n' + entitlement_verify_passed="passed" + else + printf '\x1B[1;31mThe Entitlement Registry key failed. Enter a valid Entitlement Registry key.\n\x1B[0m' + entitlement_key='' + entitlement_verify_passed="failed" + fi + elif [[ $PLATFORM_SELECTED == "other" || $OCP_VERSION == "4.1" || $OCP_VERSION == "4.2" || $OCP_VERSION == "4.3" || $OCP_VERSION == "4.4" ]] then - if docker login -u "$DOCKER_REG_USER" -p "$DOCKER_REG_KEY" "$DOCKER_REG_SERVER"; then - printf 'Entitlement Registry key is valid.\n' - entitlement_verify_passed="passed" - else - printf '\x1B[1;31mThe Entitlement Registry key failed. Enter a valid Entitlement Registry key.\n\x1B[0m' - entitlement_key='' - entitlement_verify_passed="" - entitlement_verify_passed="failed" - fi - else - docker login -u "$DOCKER_REG_USER" -p "$DOCKER_REG_KEY" "$DOCKER_REG_SERVER" >/dev/null 2>&1 - docker_image_exists "${OPERATOR_IMAGE}" - retVal=$? - if [ $retVal -ne 0 ]; then - printf '\x1B[1;31mThe Entitlement Registry key failed. Enter a valid Entitlement Registry key.\n\x1B[0m' - entitlement_key='' - entitlement_verify_passed="" - entitlement_verify_passed="failed" - else - printf 'Entitlement Registry key is valid.\n' - entitlement_verify_passed="passed" - fi - fi + if podman login -u "$DOCKER_REG_USER" -p "$DOCKER_REG_KEY" "$DOCKER_REG_SERVER" --tls-verify=false; then + printf 'Entitlement Registry key is valid.\n' + entitlement_verify_passed="passed" + else + printf '\x1B[1;31mThe Entitlement Registry key failed. Enter a valid Entitlement Registry key.\n\x1B[0m' + entitlement_key='' + entitlement_verify_passed="failed" + fi + fi done fi done @@ -419,37 +937,57 @@ function get_entitlement_registry(){ done } - function create_secret_entitlement_registry(){ printf "\x1B[1mCreating docker-registry secret for Entitlement Registry key...\n\x1B[0m" # Create docker-registry secret for Entitlement Registry Key - oc delete secret "$DOCKER_RES_SECRET_NAME" >/dev/null 2>&1 - CREATE_SECRET_CMD="oc create secret docker-registry $DOCKER_RES_SECRET_NAME --docker-server=$DOCKER_REG_SERVER --docker-username=$DOCKER_REG_USER --docker-password=$DOCKER_REG_KEY --docker-email=ecmtest@ibm.com" + ${CLI_CMD} delete secret "$DOCKER_RES_SECRET_NAME" >/dev/null 2>&1 + CREATE_SECRET_CMD="${CLI_CMD} create secret docker-registry $DOCKER_RES_SECRET_NAME --docker-server=$DOCKER_REG_SERVER --docker-username=$DOCKER_REG_USER --docker-password=$DOCKER_REG_KEY --docker-email=ecmtest@ibm.com" if $CREATE_SECRET_CMD ; then echo -e "\x1B[1mDone\x1B[0m" else echo -e "\x1B[1mFailed\x1B[0m" fi - } function get_local_registry_server(){ - # For Local Registry Server + # For internal/external Registry Server printf "\n" - - printf "\x1B[1mEnter the OCP docker registry service name, for example: docker-registry.default.svc:5000/\n\x1B[0m" - printf "\x1B[1mor the URL to the docker registry, for example: abc.xyz.com: \x1B[0m" - local_registry_server="" - while [[ $local_registry_server == "" ]] # While confirmation is not y or n... - do - read -rp "" local_registry_server - if [ -z "$local_registry_server" ]; then - echo -e "\x1B[1;31mEnter a valid service name or the URL for the docker registry.\x1B[0m" - fi - done - LOCAL_REGISTRY_SERVER=${local_registry_server} - # convert docker-registry.default.svc:5000/project-name - # to docker-registry.default.svc:5000\/project-name + if [[ "${REGISTRY_TYPE}" == "internal" && ("${OCP_VERSION}" == "4.1" || "${OCP_VERSION}" == "4.2"|| "${OCP_VERSION}" == "4.3" || "${OCP_VERSION}" == "4.4") ]];then + #This is required for docker/podman login validation. + printf "\x1B[1mEnter the public image registry or route (e.g., default-route-openshift-image-registry.apps.). \n\x1B[0m" + printf "\x1B[1mThis is required for docker/podman login validation: \x1B[0m" + local_public_registry_server="" + while [[ $local_public_registry_server == "" ]] + do + read -rp "" local_public_registry_server + if [ -z "$local_public_registry_server" ]; then + echo -e "\x1B[1;31mEnter a valid service name or the URL for the docker registry.\x1B[0m" + fi + done + fi + + if [[ "${OCP_VERSION}" == "3.11" && "${REGISTRY_TYPE}" == "internal" ]];then + printf "\x1B[1mEnter the OCP docker registry service name, for example: docker-registry.default.svc:5000/: \x1B[0m" + elif [[ "${REGISTRY_TYPE}" == "internal" && ("${OCP_VERSION}" == "4.1" || "${OCP_VERSION}" == "4.2"|| "${OCP_VERSION}" == "4.3" || "${OCP_VERSION}" == "4.4") ]] + then + printf "\n" + printf "\x1B[1mEnter the local image registry (e.g., image-registry.openshift-image-registry.svc:5000/)\n\x1B[0m" + printf "\x1B[1mThis is required to pull container images and Kubernetes secret creation: \x1B[0m" + elif [[ "${REGISTRY_TYPE}" == "external" || $PLATFORM_SELECTED == "other" ]] + then + printf "\x1B[1mEnter the URL to the docker registry, for example: abc.xyz.com: \x1B[0m" + fi + local_registry_server="" + while [[ $local_registry_server == "" ]] + do + read -rp "" local_registry_server + if [ -z "$local_registry_server" ]; then + echo -e "\x1B[1;31mEnter a valid service name or the URL for the docker registry.\x1B[0m" + fi + done + LOCAL_REGISTRY_SERVER=${local_registry_server} + # convert docker-registry.default.svc:5000/project-name + # to docker-registry.default.svc:5000\/project-name OIFS=$IFS IFS='/' read -r -a docker_reg_url_array <<< "$local_registry_server" delim="" @@ -467,7 +1005,7 @@ function get_local_registry_user(){ printf "\n" printf "\x1B[1mEnter the user name for your docker registry: \x1B[0m" local_registry_user="" - while [[ $local_registry_user == "" ]] # While confirmation is not y or n... + while [[ $local_registry_user == "" ]] do read -rp "" local_registry_user if [ -z "$local_registry_user" ]; then @@ -483,11 +1021,17 @@ function get_infra_name(){ # For Infrastructure Node printf "\n" printf "\x1B[1mIn order for the deployment to create routes for the Cloud Pak services,\n\x1B[0m" - printf "\x1B[1menter the host name of your Infrastructure Node from\n\x1B[0m" - printf "\x1B[1myour OpenShift Clould Platform environment: \x1B[0m" - + printf "\x1B[1mYou can get the host name by running the following command: \n\x1B[0m" + if [[ $OCP_VERSION == "3.11" ]];then + printf "\x1B[1;31moc get nodes --selector node-role.kubernetes.io/infra=true -o custom-columns=\":metadata.name\"\n\x1B[0m" + elif [[ $OCP_VERSION == "4.1" || $OCP_VERSION == "4.2" || $OCP_VERSION == "4.3" || $OCP_VERSION == "4.4" ]] + then + printf "\x1B[1;31moc get route console -n openshift-console -o yaml|grep routerCanonicalHostname\n\x1B[0m" + fi + printf "\x1B[1mInput the host name: \x1B[0m" + infra_name="" - while [[ $infra_name == "" ]] # While confirmation is not y or n... + while [[ $infra_name == "" ]] do read -rp "" infra_name if [ -z "$infra_name" ]; then @@ -501,31 +1045,77 @@ function get_infra_name(){ function get_storage_class_name(){ # For dynamic storage classname + storage_class_name="" + sc_slow_file_storage_classname="" + sc_medium_file_storage_classname="" + sc_fast_file_storage_classname="" printf "\n" - printf "\x1B[1mTo provision the persistent volumes and volume claims, enter the dynamic storage classname: \x1B[0m" + if [[ $DEPLOYMENT_TYPE == "demo" && ($PLATFORM_SELECTED == "OCP" || $PLATFORM_SELECTED == "other")]] ; + then + printf "\x1B[1mTo provision the persistent volumes and volume claims, enter the dynamic storage classname: \x1B[0m" + + while [[ $storage_class_name == "" ]] + do + read -rp "" storage_class_name + if [ -z "$storage_class_name" ]; then + echo -e "\x1B[1;31mEnter a valid dynamic storage classname\x1B[0m" + fi + done + elif [[ ($DEPLOYMENT_TYPE == "enterprise" && ($PLATFORM_SELECTED == "OCP" || $PLATFORM_SELECTED == "other")) || $PLATFORM_SELECTED == "ROKS" ]] + then + printf "\x1B[1mTo provision the persistent volumes and volume claims\n\x1B[0m" + while [[ $sc_slow_file_storage_classname == "" ]] # While get slow storage clase name + do + printf "\x1B[1mplease enter the dynamic storage classname for slow storage: \x1B[0m" + read -rp "" sc_slow_file_storage_classname + if [ -z "$sc_slow_file_storage_classname" ]; then + echo -e "\x1B[1;31mEnter a valid dynamic storage classname\x1B[0m" + fi + done - storage_class_name="" - while [[ $storage_class_name == "" ]] # While confirmation is not y or n... - do - read -rp "" storage_class_name - if [ -z "$storage_class_name" ]; then - echo -e "\x1B[1;31mEnter a valid dynamic storage classname\x1B[0m" - fi - done - export STORAGE_CLASS_NAME=${storage_class_name} + while [[ $sc_medium_file_storage_classname == "" ]] # While get medium storage clase name + do + printf "\x1B[1mplease enter the dynamic storage classname for medium storage: \x1B[0m" + read -rp "" sc_medium_file_storage_classname + if [ -z "$sc_medium_file_storage_classname" ]; then + echo -e "\x1B[1;31mEnter a valid dynamic storage classname\x1B[0m" + fi + done + while [[ $sc_fast_file_storage_classname == "" ]] # While get fast storage clase name + do + printf "\x1B[1mplease enter the dynamic storage classname for fast storage: \x1B[0m" + read -rp "" sc_fast_file_storage_classname + if [ -z "$sc_fast_file_storage_classname" ]; then + echo -e "\x1B[1;31mEnter a valid dynamic storage classname\x1B[0m" + fi + done + fi + STORAGE_CLASS_NAME=${storage_class_name} + SLOW_STORAGE_CLASS_NAME=${sc_slow_file_storage_classname} + MEDIUM_STORAGE_CLASS_NAME=${sc_medium_file_storage_classname} + FAST_STORAGE_CLASS_NAME=${sc_fast_file_storage_classname} } function allocate_operator_pvc(){ + # For dynamic storage classname # For dynamic storage classname printf "\n" echo -e "\x1B[1mApplying the persistent volumes for the Cloud Pak operator by using the storage classname: ${STORAGE_CLASS_NAME}...\x1B[0m" - sed "s//$STORAGE_CLASS_NAME/g" ${OPERATOR_PVC_FILE_BAK} > ${OPERATOR_PVC_FILE_TMP} # &> /dev/null + printf "\n" + if [[ $DEPLOYMENT_TYPE == "demo" && ($PLATFORM_SELECTED == "OCP" || $PLATFORM_SELECTED == "other") ]] ; + then + sed "s//$STORAGE_CLASS_NAME/g" ${OPERATOR_PVC_FILE_BAK} > ${OPERATOR_PVC_FILE_TMP} # &> /dev/null + elif [[ ($DEPLOYMENT_TYPE == "enterprise" && ($PLATFORM_SELECTED == "OCP" || $PLATFORM_SELECTED == "other")) || $PLATFORM_SELECTED == "ROKS" ]]; + then + sed "s//$SLOW_STORAGE_CLASS_NAME/g" ${OPERATOR_PVC_FILE_BAK} > ${OPERATOR_PVC_FILE_TMP} # &> /dev/null + fi + cp -rf ${OPERATOR_PVC_FILE_TMP} ${OPERATOR_PVC_FILE_BAK} # Create Operator Persistent Volume. - CREATE_PVC_CMD="oc apply -f ${OPERATOR_PVC_FILE_TMP}" + CREATE_PVC_CMD="${CLI_CMD} apply -f ${OPERATOR_PVC_FILE_TMP}" if $CREATE_PVC_CMD ; then echo -e "\x1B[1mDone\x1B[0m" else @@ -536,13 +1126,13 @@ function allocate_operator_pvc(){ TIMEOUT=60 printf "\n" echo -e "\x1B[1mWaiting for the persistent volumes to be ready...\x1B[0m" - until oc get pvc | grep operator-shared-pvc | grep -q -m 1 "Bound" || [ $ATTEMPTS -eq $TIMEOUT ]; do + until ${CLI_CMD} get pvc | grep operator-shared-pvc | grep -q -m 1 "Bound" || [ $ATTEMPTS -eq $TIMEOUT ]; do ATTEMPTS=$((ATTEMPTS + 1)) echo -e "......" sleep 10 if [ $ATTEMPTS -eq $TIMEOUT ] ; then echo -e "\x1B[1;31mFailed to allocate the persistent volumes!\x1B[0m" - echo -e "\x1B[1;31mRun the following command to check the claim 'oc describe pvc operator-shared-pvc'\x1B[0m" + echo -e "\x1B[1;31mRun the following command to check the claim '${CLI_CMD} describe pvc operator-shared-pvc'\x1B[0m" exit 1 fi done @@ -551,34 +1141,16 @@ function allocate_operator_pvc(){ fi } -function show_summary(){ - - printf "\n" - echo -e "\x1B[1m*******************************************************\x1B[0m" - echo -e "\x1B[1m Summary of input \x1B[0m" - echo -e "\x1B[1m*******************************************************\x1B[0m" - echo -e "\x1B[1;31m1. Cloud Pak capability to deploy: ${PATTERN_SELECTED}\x1B[0m" - echo -e "\x1B[1;31m2. Optional components to deploy: ${COMPONENTS_SELECTED}\x1B[0m" - echo -e "\x1B[1;31m3. Entitlement Registry key: ${DOCKER_REG_KEY}\x1B[0m" - echo -e "\x1B[1;31m4. Docker registry service name or URL: ${LOCAL_REGISTRY_SERVER}\x1B[0m" - echo -e "\x1B[1;31m5. Docker registry user name: ${LOCAL_REGISTRY_USER}\x1B[0m" - # echo -e "\x1B[1;31m5. Docker registry password: ${LOCAL_REGISTRY_PWD}\x1B[0m" - echo -e "\x1B[1;31m6. Docker registry password: \x1B[0m" # not show plaintext password - echo -e "\x1B[1;31m7. OCP Infrastructure Node: ${INFRA_NAME}\x1B[0m" - echo -e "\x1B[1;31m8. Dynamic storage classname: ${STORAGE_CLASS_NAME}\x1B[0m" - echo -e "\x1B[1m*******************************************************\x1B[0m" -} - function create_secret_local_registry(){ echo -e "\x1B[1mCreating the secret based on the local docker registry information...\x1B[0m" # Create docker-registry secret for local Registry Key # echo -e "Create docker-registry secret for Local Registry...\n" - oc delete secret "$DOCKER_RES_SECRET_NAME" >/dev/null 2>&1 - if [[ $LOCAL_REGISTRY_SERVER == docker-registry* ]] ; + ${CLI_CMD} delete secret "$DOCKER_RES_SECRET_NAME" >/dev/null 2>&1 + if [[ $LOCAL_REGISTRY_SERVER == docker-registry* || $LOCAL_REGISTRY_SERVER == image-registry.openshift-image-registry* ]] ; then - CREATE_SECRET_CMD="oc create secret docker-registry $DOCKER_RES_SECRET_NAME --docker-server=$LOCAL_REGISTRY_SERVER --docker-username=$LOCAL_REGISTRY_USER --docker-password=$(oc whoami -t) --docker-email=ecmtest@ibm.com" + CREATE_SECRET_CMD="${CLI_CMD} create secret docker-registry $DOCKER_RES_SECRET_NAME --docker-server=$LOCAL_REGISTRY_SERVER --docker-username=$LOCAL_REGISTRY_USER --docker-password=$(${CLI_CMD} whoami -t) --docker-email=ecmtest@ibm.com" else - CREATE_SECRET_CMD="oc create secret docker-registry $DOCKER_RES_SECRET_NAME --docker-server=$LOCAL_REGISTRY_SERVER --docker-username=$LOCAL_REGISTRY_USER --docker-password=$LOCAL_REGISTRY_PWD --docker-email=ecmtest@ibm.com" + CREATE_SECRET_CMD="${CLI_CMD} create secret docker-registry $DOCKER_RES_SECRET_NAME --docker-server=$LOCAL_REGISTRY_SERVER --docker-username=$LOCAL_REGISTRY_USER --docker-password=$LOCAL_REGISTRY_PWD --docker-email=ecmtest@ibm.com" fi if $CREATE_SECRET_CMD ; then echo -e "\x1B[1mDone\x1B[0m" @@ -588,56 +1160,563 @@ function create_secret_local_registry(){ } function verify_local_registry_password(){ - while [[ $verify_passed == "" ]] + # require to preload image for CP4A image and ldap/db2 image for demo + printf "\n" + while true; do + printf "\x1B[1mHave you pushed the images to the local registry using 'loadimages.sh' (CP4A images)\n\x1B[0m" + printf "\x1B[1mand 'loadPrereqImages.sh' (Db2 and OpenLDAP for demo) scripts (Yes/No)? \x1B[0m" + read -rp "" ans + case "$ans" in + "y"|"Y"|"yes"|"Yes"|"YES") + PRE_LOADED_IMAGE="Yes" + break + ;; + "n"|"N"|"no"|"No"|"NO") + echo -e "\x1B[1;31mPlease pull the images to the local images to proceed.\n\x1B[0m" + exit 1 + ;; + *) + echo -e "Answer must be \"Yes\" or \"No\"\n" + ;; + esac + done + + # Select whice type of image registry to use. + if [[ "${PLATFORM_SELECTED}" == "OCP" ]]; then + printf "\n" + echo -e "\x1B[1mSelect the type of image registry to use:: \x1B[0m" + COLUMNS=12 + options=("Openshift Container Platform (OCP) - Internal image registry" "Other ( External image registry: abc.xyz.com )") + + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "Openshift Container Platform (OCP) - Internal image registry") + REGISTRY_TYPE="internal" + break + ;; + "Other ( External image registry: abc.xyz.com )") + REGISTRY_TYPE="external" + break + ;; + *) echo "invalid option $REPLY";; + esac + done + else + REGISTRY_TYPE="external" + fi + + while [[ $verify_passed == "" && $PRE_LOADED_IMAGE == "Yes" ]] do get_local_registry_server get_local_registry_user get_local_registry_password - - if [[ $LOCAL_REGISTRY_SERVER == docker-registry* ]] ; + + if [[ $LOCAL_REGISTRY_SERVER == docker-registry* || $LOCAL_REGISTRY_SERVER == image-registry* || $LOCAL_REGISTRY_SERVER == default-route-openshift-image-registry* ]] ; then - if docker login -u "$LOCAL_REGISTRY_USER" -p $(oc whoami -t) "$LOCAL_REGISTRY_SERVER"; then - printf 'Verifying Local Registry passed...\n' - verify_passed="passed" - else - printf '\x1B[1;31mLogin failed...\n\x1B[0m' - verify_passed="" - local_registry_user="" - local_registry_server="" - echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" + if [[ $OCP_VERSION == "3.11" || "$machine" == "Mac" ]];then + if docker login -u "$LOCAL_REGISTRY_USER" -p $(${CLI_CMD} whoami -t) "$LOCAL_REGISTRY_SERVER"; then + printf 'Verifying Local Registry passed...\n' + verify_passed="passed" + else + printf '\x1B[1;31mLogin failed...\n\x1B[0m' + verify_passed="" + local_registry_user="" + local_registry_server="" + echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" + fi + elif [[ $OCP_VERSION == "4.1" || $OCP_VERSION == "4.2" || $OCP_VERSION == "4.3" || $OCP_VERSION == "4.4" ]] + then + which podman &>/dev/null + if [[ $? -eq 0 ]];then + if podman login "$local_public_registry_server" -u "$LOCAL_REGISTRY_USER" -p $(${CLI_CMD} whoami -t) --tls-verify=false; then + printf 'Verifying Local Registry passed...\n' + verify_passed="passed" + else + printf '\x1B[1;31mLogin failed...\n\x1B[0m' + verify_passed="" + local_registry_user="" + local_registry_server="" + local_public_registry_server="" + echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" + fi + else + if docker login "$local_public_registry_server" -u "$LOCAL_REGISTRY_USER" -p $(${CLI_CMD} whoami -t); then + printf 'Verifying Local Registry passed...\n' + verify_passed="passed" + else + printf '\x1B[1;31mLogin failed...\n\x1B[0m' + verify_passed="" + local_registry_user="" + local_registry_server="" + local_public_registry_server="" + echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" + fi + fi fi else - if docker login -u "$LOCAL_REGISTRY_USER" -p "$LOCAL_REGISTRY_PWD" "$LOCAL_REGISTRY_SERVER"; then - printf 'Verifying the information for the local docker registry...\n' - verify_passed="passed" + which podman &>/dev/null + if [[ $? -eq 0 ]];then + if podman login -u "$LOCAL_REGISTRY_USER" -p "$LOCAL_REGISTRY_PWD" "$LOCAL_REGISTRY_SERVER" --tls-verify=false; then + printf 'Verifying the information for the local docker registry...\n' + verify_passed="passed" + else + printf '\x1B[1;31mLogin failed...\n\x1B[0m' + verify_passed="" + local_registry_user="" + local_registry_server="" + echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" + fi else - printf '\x1B[1;31mLogin failed...\n\x1B[0m' - verify_passed="" - local_registry_user="" - local_registry_server="" - echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" - fi + if docker login -u "$LOCAL_REGISTRY_USER" -p "$LOCAL_REGISTRY_PWD" "$LOCAL_REGISTRY_SERVER"; then + printf 'Verifying the information for the local docker registry...\n' + verify_passed="passed" + else + printf '\x1B[1;31mLogin failed...\n\x1B[0m' + verify_passed="" + local_registry_user="" + local_registry_server="" + echo -e "\x1B[1;31mCheck the local docker registry information and try again.\x1B[0m" + fi + fi fi done } +function select_installation_type(){ + COLUMNS=12 + echo -e "\x1B[1mIs this a new install or an existing install?\x1B[0m" + options=("New" "Existing") + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "New") + INSTALLATION_TYPE="new" + break + ;; + "Existing") + INSTALLATION_TYPE="existing" + get_existing_pattern_name + break + ;; + *) echo "invalid option $REPLY";; + esac + done +} + +function select_deployment_type(){ + printf "\n" + echo -e "\x1B[1mWhat type of deployment is being performed?\x1B[0m" + if [[ $PLATFORM_SELECTED == "ROKS" ]]; + then + DEPLOYMENT_TYPE="demo" + printf "\n" + + echo -e "\x1B[1;31mOnly \"Demo\" deployment is currently supported on RedHat OpenShift Kubernetes Service (ROKS) - Public Cloud.\n\x1B[0m" + read -rsn1 -p"Press any key to continue ...";echo + # options=("Demo") + # PS3='Enter a valid option [1 to 1]: ' + # select opt in "${options[@]}" + # do + # case $opt in + # "Demo") + # DEPLOYMENT_TYPE="demo" + # break + # ;; + # *) echo "invalid option $REPLY";; + # esac + # done + else + COLUMNS=12 + options=("Demo" "Enterprise") + if [ -z "$existing_deployment_type" ]; then + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "Demo") + DEPLOYMENT_TYPE="demo" + break + ;; + "Enterprise") + DEPLOYMENT_TYPE="enterprise" + break + ;; + *) echo "invalid option $REPLY";; + esac + done + else + options_var=("demo" "enterprise") + for i in ${!options_var[@]}; do + if [[ "${options_var[i]}" == "$existing_deployment_type" ]]; then + printf "%1d) %s \x1B[1m%s\x1B[0m\n" $((i+1)) "${options[i]}" "(Selected)" + else + printf "%1d) %s\n" $((i+1)) "${options[i]}" + fi + done + echo -e "\x1B[1;31mExisting deployment type found in CR: \"$existing_deployment_type\"\x1B[0m" + echo -e "\x1B[1;31mDo not need to select again.\n\x1B[0m" + read -rsn1 -p"Press any key to continue ...";echo + fi + fi +} + +function select_ldap_type(){ + COLUMNS=12 + echo -e "\x1B[1mWhat is the LDAP type used for this deployment? \x1B[0m" + options=("Microsoft Active Directory" "Tivoli Directory Server / Security Directory Server") + PS3='Enter a valid option [1 to 2]: ' + select opt in "${options[@]}" + do + case $opt in + "Microsoft Active Directory") + LDAP_TYPE="AD" + break + ;; + Tivoli*) + LDAP_TYPE="TDS" + break + ;; + *) echo "invalid option $REPLY";; + esac + done + +} +function set_ldap_type_foundation(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${FOUNDATION_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + + if [[ "$LDAP_TYPE" == "AD" ]]; then + content_start="$(grep -n "ad:" ${FOUNDATION_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + else + content_start="$(grep -n "tds:" ${FOUNDATION_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + fi + content_stop="$(tail -n +$content_start < ${FOUNDATION_PATTERN_FILE_TMP} | grep -n "lc_group_filter:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${FOUNDATION_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' >/dev/null 2>&1 + + cp -r ${FOUNDATION_PATTERN_FILE_TMP} ${FOUNDATION_PATTERN_FILE_BAK} + fi +} + +function set_ldap_type_content_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${CONTENT_PATTERN_FILE_BAK} ${CONTENT_PATTERN_FILE_TMP} + + if [[ "$LDAP_TYPE" == "AD" ]]; then + content_start="$(grep -n "ad:" ${CONTENT_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + else + content_start="$(grep -n "tds:" ${CONTENT_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + fi + content_stop="$(tail -n +$content_start < ${CONTENT_PATTERN_FILE_TMP} | grep -n "lc_group_filter:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' >/dev/null 2>&1 + + cp -r ${CONTENT_PATTERN_FILE_TMP} ${CONTENT_PATTERN_FILE_BAK} + fi +} + +function set_ldap_type_workstreams_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${WORKSTREAMS_PATTERN_FILE_BAK} ${WORKSTREAMS_PATTERN_FILE_TMP} + + if [[ "$LDAP_TYPE" == "AD" ]]; then + content_start="$(grep -n "ad:" ${WORKSTREAMS_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + else + content_start="$(grep -n "tds:" ${WORKSTREAMS_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + fi + content_stop="$(tail -n +$content_start < ${WORKSTREAMS_PATTERN_FILE_TMP} | grep -n "lc_group_filter:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${WORKSTREAMS_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' >/dev/null 2>&1 + + cp -r ${WORKSTREAMS_PATTERN_FILE_TMP} ${WORKSTREAMS_PATTERN_FILE_BAK} + fi +} + +function set_ldap_type_workflow_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${WORKFLOW_PATTERN_FILE_BAK} ${WORKFLOW_PATTERN_FILE_TMP} + + if [[ "$LDAP_TYPE" == "AD" ]]; then + content_start="$(grep -n "ad:" ${WORKFLOW_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + else + content_start="$(grep -n "tds:" ${WORKFLOW_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + fi + content_stop="$(tail -n +$content_start < ${WORKFLOW_PATTERN_FILE_TMP} | grep -n "lc_group_filter:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${WORKFLOW_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' >/dev/null 2>&1 + + cp -r ${WORKFLOW_PATTERN_FILE_TMP} ${WORKFLOW_PATTERN_FILE_BAK} + fi +} + +function set_ldap_type_ww_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${WW_PATTERN_FILE_BAK} ${WW_PATTERN_FILE_TMP} + + if [[ "$LDAP_TYPE" == "AD" ]]; then + content_start="$(grep -n "ad:" ${WW_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + else + content_start="$(grep -n "tds:" ${WW_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + fi + content_stop="$(tail -n +$content_start < ${WW_PATTERN_FILE_TMP} | grep -n "lc_group_filter:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${WW_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' >/dev/null 2>&1 + + cp -r ${WW_PATTERN_FILE_TMP} ${WW_PATTERN_FILE_BAK} + fi +} + +function set_external_share_content_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + containsElement "es" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + cp -r ${CONTENT_PATTERN_FILE_BAK} ${CONTENT_PATTERN_FILE_TMP} + # un-comment ext_ldap_configuration + content_start="$(grep -n "ext_ldap_configuration:" ${CONTENT_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + content_stop="$(tail -n +$content_start < ${CONTENT_PATTERN_FILE_TMP} | grep -n "lc_ldap_group_member_id_map:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' >/dev/null 2>&1 + + # un-comment LDAP + if [[ "$LDAP_TYPE" == "AD" ]]; then + content_start="$(grep -n "ad:" ${CONTENT_PATTERN_FILE_TMP} | awk 'NR==2{print $1}' | cut -d: -f1)" + else + content_start="$(grep -n "tds:" ${CONTENT_PATTERN_FILE_TMP} | awk 'NR==2{print $1}' | cut -d: -f1)" + fi + content_stop="$(tail -n +$content_start < ${CONTENT_PATTERN_FILE_TMP} | grep -n "lc_group_filter:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_stop + $content_start - 1)) + vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/ # / ' -c ':wq' + + cp -r ${CONTENT_PATTERN_FILE_TMP} ${CONTENT_PATTERN_FILE_BAK} + fi + fi +} + +function set_object_store_content_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${CONTENT_PATTERN_FILE_BAK} ${CONTENT_PATTERN_FILE_TMP} + content_start="$(grep -n "datasource_configuration:" ${CONTENT_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" + content_tmp="$(tail -n +$content_start < ${CONTENT_PATTERN_FILE_TMP} | grep -n "dc_os_datasources:" | head -n1 | cut -d: -f1)" + content_tmp=$(( content_tmp + $content_start - 1)) + content_stop="$(tail -n +$content_tmp < ${CONTENT_PATTERN_FILE_TMP} | grep -n "dc_database_type:" | head -n1 | cut -d: -f1)" + content_start=$(( $content_stop + $content_tmp - 1)) + content_tmp="$(tail -n +$content_start < ${CONTENT_PATTERN_FILE_TMP} | grep -n "dc_hadr_max_retries_for_client_reroute:" | head -n1 | cut -d: -f1)" + content_stop=$(( $content_start + $content_tmp - 1)) + # 2nd object store + if [[ "$content_os_number" == 2 ]]; then + vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"' copy '"${content_stop}"'' -c ':wq' >/dev/null 2>&1 + ${YQ_CMD} w -i ${CONTENT_PATTERN_FILE_TMP} spec.datasource_configuration.dc_os_datasources.[1].dc_common_os_datasource_name "FNOS2DS" + ${YQ_CMD} w -i ${CONTENT_PATTERN_FILE_TMP} spec.datasource_configuration.dc_os_datasources.[1].dc_common_os_xa_datasource_name "FNOS2DSXA" + fi + # 3rd object store + if [[ "$content_os_number" == 3 ]]; then + vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"' copy '"${content_stop}"'' -c ':wq' >/dev/null 2>&1 + vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"' copy '"${content_stop}"'' -c ':wq' >/dev/null 2>&1 + ${YQ_CMD} w -i ${CONTENT_PATTERN_FILE_TMP} spec.datasource_configuration.dc_os_datasources.[1].dc_common_os_datasource_name "FNOS2DS" + ${YQ_CMD} w -i ${CONTENT_PATTERN_FILE_TMP} spec.datasource_configuration.dc_os_datasources.[1].dc_common_os_xa_datasource_name "FNOS2DSXA" + ${YQ_CMD} w -i ${CONTENT_PATTERN_FILE_TMP} spec.datasource_configuration.dc_os_datasources.[2].dc_common_os_datasource_name "FNOS3DS" + ${YQ_CMD} w -i ${CONTENT_PATTERN_FILE_TMP} spec.datasource_configuration.dc_os_datasources.[2].dc_common_os_xa_datasource_name "FNOS3DSXA" + fi + cp -r ${CONTENT_PATTERN_FILE_TMP} ${CONTENT_PATTERN_FILE_BAK} + fi +} + +function set_aca_tenant_pattern(){ + if [[ $DEPLOYMENT_TYPE == "enterprise" ]] ; + then + cp -r ${ACA_PATTERN_FILE_BAK} ${ACA_PATTERN_FILE_TMP} + # ${YQ_CMD} d -i ${ACA_PATTERN_FILE_TMP} spec.datasource_configuration.dc_ca_datasource.tenant_databases + if [ ${#aca_tenant_arr[@]} -eq 0 ]; then + echo -e "\x1B[1;31mNot any element in ACA tenant list found\x1B[0m:\x1B[1m" + else + for i in ${!aca_tenant_arr[@]}; do + ${YQ_CMD} w -i ${ACA_PATTERN_FILE_TMP} spec.datasource_configuration.dc_ca_datasource.tenant_databases.[${i}] "${aca_tenant_arr[i]}" + done + fi + cp -r ${ACA_PATTERN_FILE_TMP} ${ACA_PATTERN_FILE_BAK} + fi +} + +function select_aca_tenant(){ + printf "\n" + printf "\x1B[1mHow many tenants do you want to create initially with Automation Content Analyzer? \x1B[0m" + aca_tenant_number="" + aca_tenant_arr=() + while [[ $aca_tenant_number == "" ]]; + do + read -rp "" aca_tenant_number + if ! [[ "$aca_tenant_number" =~ ^[0-9]+$ ]]; then + echo -e "\x1B[1;31mEnter a valid tenant number\x1B[0m" + aca_tenant_number="" + fi + done + + order_number=1 + while (( ${#aca_tenant_arr[@]} < $aca_tenant_number )); + do + printf "\x1B[1mWhat is the name of tenant ${order_number}? \x1B[0m" + read -rp "" aca_tenant_name + if [ -z "$aca_tenant_number" ]; then + echo -e "\x1B[1;31mEnter a valid tenant name\x1B[0m" + else + aca_tenant_arr=( "${aca_tenant_arr[@]}" "${aca_tenant_name}" ) + fi + ((order_number++)) + printf "\n" + done + printf "\n" +} + +function select_baw_iaws(){ + pattern_arr=() + pattern_cr_arr=() + printf "\n" + echo -e "\x1B[1mTips\x1B[0m: You may only choose one option for the entire installation as these choices are mutually exclusive. " + echo -e "\x1B[1mSelect the Cloud Pak for Automation capability to install: \x1B[0m" + COLUMNS=12 + get_baw_mode + retVal_baw=$? + + if [[ "${DEPLOYMENT_TYPE}" == "demo" && $retVal_baw -eq 0 ]]; + then + options=("Business Automation Workflow") + PS3='Enter a valid option [1 to 1]: ' + elif [[ "${DEPLOYMENT_TYPE}" == "demo" && $retVal_baw -eq 1 ]]; + then + options=("Business Automation Workflow and Automation Workstream Services") + PS3='Enter a valid option [1 to 1]: ' + elif [[ "${DEPLOYMENT_TYPE}" == "enterprise" && $retVal_baw -eq 0 ]] + then + options=("Business Automation Workflow") + PS3='Enter a valid option [1 to 1]: ' + elif [[ "${DEPLOYMENT_TYPE}" == "enterprise" && $retVal_baw -eq 1 ]] + then + options=("Business Automation Workflow" "Automation Workstream Services" "Business Automation Workflow and Automation Workstream Services") + PS3='Enter a valid option [1 to 3]: ' + fi + + select opt in "${options[@]}" + do + case $opt in + "Business Automation Workflow") + pattern_arr=("Business Automation Workflow") + pattern_cr_arr=("workflow") + break + ;; + "Automation Workstream Services") + pattern_arr=("Automation Workstream Services") + pattern_cr_arr=("workstreams") + break + ;; + "Business Automation Workflow and Automation Workstream Services") + pattern_arr=("Business Automation Workflow and Automation Workstream Services") + pattern_cr_arr=("workflow-workstreams") + break + ;; + *) echo "invalid option $REPLY";; + esac + done + foundation_ww=("BAN" "RR" "UMS") + foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_ww[@]}" ) + PATTERNS_CR_SELECTED=$( IFS=$','; echo "${pattern_cr_arr[*]}" ) + + FOUNDATION_CR_SELECTED=($(echo "${foundation_component_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) + # FOUNDATION_CR_SELECTED_LOWCASE=( "${FOUNDATION_CR_SELECTED[@],,}" ) + + x=0;while [ ${x} -lt ${#FOUNDATION_CR_SELECTED[*]} ] ; do FOUNDATION_CR_SELECTED_LOWCASE[$x]=$(tr [A-Z] [a-z] <<< ${FOUNDATION_CR_SELECTED[$x]}); let x++; done + FOUNDATION_DELETE_LIST=($(echo "${FOUNDATION_CR_SELECTED[@]}" "${FOUNDATION_FULL_ARR[@]}" | tr ' ' '\n' | sort | uniq -u)) + + PATTERNS_CR_SELECTED=($(echo "${pattern_cr_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) +} function input_information(){ + select_installation_type + + if [[ ${INSTALLATION_TYPE} == "existing" ]]; then + INSTALL_BAW_IAWS="No" + prepare_pattern_file + select_platform + check_ocp_version + validate_docker_podman_cli + select_deployment_type + elif [[ ${INSTALLATION_TYPE} == "new" ]] + then + select_platform + check_ocp_version + validate_docker_podman_cli + select_deployment_type + prepare_pattern_file + select_baw_iaws_installation + fi + + if [[ "${INSTALLATION_TYPE}" == "existing" ]] && (( ${#EXISTING_PATTERN_ARR[@]} == 0 )); then + # get_existing_pattern_name + # if (( ${#EXISTING_PATTERN_ARR[@]} == 0 )); then + echo -e "\x1B[1;31mTHERE IS NOT ANY EXISTING PATTERN FOUND!\x1B[0m" + read -rsn1 -p"Press any key to continue install new pattern...";echo + fi - select_pattern + if [[ "${INSTALL_BAW_IAWS}" = "No" ]]; + then + select_pattern + else + select_baw_iaws + fi select_optional_component get_entitlement_registry - if [ "$use_entitlement" = "no" ]; then verify_local_registry_password fi - get_infra_name + + if [[ $PLATFORM_SELECTED == "OCP" ]]; + then + get_infra_name + fi get_storage_class_name + + if [[ "$DEPLOYMENT_TYPE" == "enterprise" ]]; then + select_ldap_type + fi + + containsElement "content" "${PATTERNS_CR_SELECTED[@]}" + retVal=$? + if [[ ( $retVal -eq 0 ) && "$DEPLOYMENT_TYPE" == "enterprise" ]]; then + select_objectstore_number + fi + + containsElement "contentanalyzer" "${PATTERNS_CR_SELECTED[@]}" + retVal=$? + if [[ ( $retVal -eq 0 ) && "$DEPLOYMENT_TYPE" == "enterprise" ]]; then + select_aca_tenant + fi + + # containsElement "decisions_ads" "${PATTERNS_CR_SELECTED[@]}" + # retVal=$? + # if [[ ( $retVal -eq 0 ) && ( "$DEPLOYMENT_TYPE" == "non-production" || "$DEPLOYMENT_TYPE" == "production") ]]; then + # select_ads_designer + # fi } function apply_cp4a_operator(){ + cp -r ${OPERATOR_FILE_BAK} ${OPERATOR_FILE_TMP} + printf "\n" echo -e "\x1B[1mInstalling the Cloud Pak for Automation operator...\x1B[0m" + + # set db2_license + ${SED_COMMAND} '/dba_license/{n;s/value:/value: accept/;}' ${OPERATOR_FILE_TMP} + ${SED_COMMAND} '/baw_license/{n;s/value:/value: accept/;}' ${OPERATOR_FILE_TMP} # Set operator image pull secret ${SED_COMMAND} "s|admin.registrykey|$DOCKER_RES_SECRET_NAME|g" ${OPERATOR_FILE_TMP} # Set operator image registry @@ -649,23 +1728,27 @@ function apply_cp4a_operator(){ else ${SED_COMMAND} "s/$new_operator/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${OPERATOR_FILE_TMP} fi - cp -rf ${OPERATOR_FILE_TMP} ${OPERATOR_FILE_BAK} - oc delete -f ${OPERATOR_FILE_TMP} >/dev/null 2>&1 + if [[ "${OCP_VERSION}" == "3.11" ]];then + ${SED_COMMAND} "s/\# runAsUser\: 1001/runAsUser\: 1001/g" ${OPERATOR_FILE_TMP} + fi + + ${CLI_CMD} delete -f ${OPERATOR_FILE_TMP} >/dev/null 2>&1 sleep 5 - INSTALL_OPERATOR_CMD="oc apply -f ${OPERATOR_FILE_TMP}" + INSTALL_OPERATOR_CMD="${CLI_CMD} apply -f ${OPERATOR_FILE_TMP}" if $INSTALL_OPERATOR_CMD ; then echo -e "\x1B[1mDone\x1B[0m" else echo -e "\x1B[1;31mFailed\x1B[0m" fi + cp -rf ${OPERATOR_FILE_TMP} ${OPERATOR_FILE_BAK} printf "\n" # Check deployment rollout status every 5 seconds (max 10 minutes) until complete. echo -e "\x1B[1mWaiting for the Cloud Pak operator to be ready. This might take a few minutes... \x1B[0m" ATTEMPTS=0 - ROLLOUT_STATUS_CMD="oc rollout status deployment/ibm-cp4a-operator" + ROLLOUT_STATUS_CMD="${CLI_CMD} rollout status deployment/ibm-cp4a-operator" until $ROLLOUT_STATUS_CMD || [ $ATTEMPTS -eq 120 ]; do $ROLLOUT_STATUS_CMD ATTEMPTS=$((ATTEMPTS + 1)) @@ -679,11 +1762,11 @@ function apply_cp4a_operator(){ printf "\n" } -function copy_db2_jdbc(){ +function copy_jdbc_driver(){ # Get pod name - echo -e "\x1B[1mCopying the Db2 JDBC driver for the operator...\x1B[0m" - operator_podname=$(oc get pod|grep ibm-cp4a-operator|grep Running|awk '{print $1}') - COPY_JDBC_CMD="oc cp ${DB2_JDBC_DRIVER_DIR} ${operator_podname}:/opt/ansible/share/jdbc -c ansible" + echo -e "\x1B[1mCopying the JDBC driver for the operator...\x1B[0m" + operator_podname=$(${CLI_CMD} get pod|grep ibm-cp4a-operator|grep Running|awk '{print $1}') + COPY_JDBC_CMD="${CLI_CMD} cp ${JDBC_DRIVER_DIR} ${operator_podname}:/opt/ansible/share/jdbc -c ansible" if $COPY_JDBC_CMD ; then echo -e "\x1B[1mDone\x1B[0m" @@ -691,307 +1774,806 @@ function copy_db2_jdbc(){ echo -e "\x1B[1;31mFailed\x1B[0m" fi } -# Begin - Modify CONTENT pattern yaml according pattent/components selected -function apply_content_pattern_cr(){ - cp -r ${CONTENT_PATTERN_FILE_BAK} ${CONTENT_PATTERN_FILE_TMP} - # Set sc_optional_components='' when none optional component selected - if [ "$COMPONENTS_SELECTED" = "None" ]; then - ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"\"|g" ${CONTENT_PATTERN_FILE_TMP} - else - ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"$COMPONENTS_SELECTED\"|g" ${CONTENT_PATTERN_FILE_TMP} - content_start="$(grep -n "cmis:" ${CONTENT_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" - content_stop="$(tail -n +$content_start < ${CONTENT_PATTERN_FILE_TMP} | grep -n "tag:" | head -n1 | cut -d: -f1)" - content_stop=$(( $content_stop + $content_start - 1)) - vi ${CONTENT_PATTERN_FILE_TMP} -c ':'"${content_start}"','"${content_stop}"'s/^#/' -c ':wq' +function set_foundation_components(){ + if (( ${#FOUNDATION_DELETE_LIST[@]} > 0 ));then + cp -r ${FOUNDATION_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + if (( ${#OPT_COMPONENTS_CR_SELECTED[@]} > 0 ));then + # OPT_COMPONENTS_CR_SELECTED + OPT_COMPONENTS_CR_SELECTED_UPPERCASE=() + x=0;while [ ${x} -lt ${#OPT_COMPONENTS_CR_SELECTED[*]} ] ; do OPT_COMPONENTS_CR_SELECTED_UPPERCASE[$x]=$(tr [a-z] [A-Z] <<< ${OPT_COMPONENTS_CR_SELECTED[$x]}); let x++; done + + for host in ${OPT_COMPONENTS_CR_SELECTED_UPPERCASE[@]}; do + FOUNDATION_DELETE_LIST=( "${FOUNDATION_DELETE_LIST[@]/$host}" ) + done + fi + + for item in "${FOUNDATION_DELETE_LIST[@]}"; do + if [[ "$item" == "BAS" ]];then + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bastudio_configuration + fi + if [[ "$item" == "UMS" ]];then + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ums_configuration + fi + if [[ "$item" == "BAN" ]];then + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.navigator_configuration + fi + if [[ "$item" == "RR" ]];then + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.resource_registry_configuration + fi + done + cp -r ${FOUNDATION_PATTERN_FILE_TMP} ${FOUNDATION_PATTERN_FILE_BAK} fi +} - # Set sc_deployment_patterns=content - ${SED_COMMAND} "s|sc_deployment_patterns:.*|sc_deployment_patterns: content|g" ${CONTENT_PATTERN_FILE_TMP} +function merge_pattern(){ + cp -r ${FOUNDATION_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + set_ldap_type_foundation + for item in "${PATTERNS_CR_SELECTED[@]}"; do + while true; do + case $item in + "content") + set_ldap_type_content_pattern + set_external_share_content_pattern + set_object_store_content_pattern + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${CONTENT_PATTERN_FILE_BAK} + break + ;; + "contentanalyzer") + set_aca_tenant_pattern + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.datasource_configuration.dc_ca_datasource.tenant_databases + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${ACA_PATTERN_FILE_BAK} + break + ;; + "decisions") + set_decision_feature + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${DECISIONS_PATTERN_FILE_BAK} + break + ;; + "workflow") + set_ldap_type_workflow_pattern + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.baw_configuration.[*] + if [[ $DEPLOYMENT_TYPE == "enterprise" ]];then + cp -rf ${WORKFLOW_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + elif [[ $DEPLOYMENT_TYPE == "demo" ]] + then + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${WORKFLOW_PATTERN_FILE_BAK} + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bastudio_configuration + fi + break + ;; + "workstreams") + set_ldap_type_workstreams_pattern + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.baw_configuration.[*] + if [[ $DEPLOYMENT_TYPE == "enterprise" ]];then + cp -rf ${WORKSTREAMS_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + elif [[ $DEPLOYMENT_TYPE == "demo" ]] + then + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${WORKSTREAMS_PATTERN_FILE_BAK} + fi + break + ;; + "workflow-workstreams") + set_ldap_type_ww_pattern + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.baw_configuration.[*] + if [[ $DEPLOYMENT_TYPE == "enterprise" ]];then + cp -rf ${WW_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + elif [[ $DEPLOYMENT_TYPE == "demo" ]] + then + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${WW_PATTERN_FILE_BAK} + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bastudio_configuration + fi + break + ;; + "application") + set_baa_app_designer + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${APPLICATION_PATTERN_FILE_BAK} + break + ;; + "digitalworker") + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${ADW_PATTERN_FILE_BAK} + break + ;; + "decisions_ads") + set_ads_designer_runtime + ${YQ_CMD} m -a -i -M ${FOUNDATION_PATTERN_FILE_TMP} ${ADS_PATTERN_FILE_BAK} + break + ;; + esac + done + done + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.shared_configuration.image_pull_secrets + # ${YQ_CMD} w -i ${FOUNDATION_PATTERN_FILE_TMP} spec.shared_configuration.image_pull_secrets.[0] "image-pull-secret" + cp -r ${FOUNDATION_PATTERN_FILE_TMP} ${FOUNDATION_PATTERN_FILE_BAK} +} - # Set sc_deployment_hostname_suffix - ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}.${INFRA_NAME}\"|g" ${CONTENT_PATTERN_FILE_TMP} +function merge_optional_components(){ + cp -r ${FOUNDATION_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} - # Set sc_dynamic_storage_classname - ${SED_COMMAND} "s|sc_dynamic_storage_classname:.*|sc_dynamic_storage_classname: ${storage_class_name}|g" ${CONTENT_PATTERN_FILE_TMP} + for item in "${OPTIONAL_COMPONENT_DELETE_LIST[@]}"; do + while true; do + case $item in + "bas") + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bastudio_configuration + break + ;; + "ums") + containsElement "bai" "${optional_component_cr_arr[@]}" + retVal=$? + if [[ $retVal -eq 1 ]]; then + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ums_configuration + fi + break + ;; + "cmis") + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ecm_configuration.cmis + break + ;; + "css") + break + ;; + "es") + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ecm_configuration.es + break + ;; + "tm") + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ecm_configuration.tm + break + ;; + "bai") + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bai_configuration + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.shared_configuration.kafka_configuration + break + ;; + "ads_designer") + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bai_configuration + break + ;; + "ads_runtime") + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bai_configuration + break + ;; + "decisionCenter") + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bai_configuration + break + ;; + "decisionRunner") + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bai_configuration + break + ;; + "decisionServerRuntime") + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bai_configuration + break + ;; + "app_designer") + # foundation_app=("BAS") + # foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_app[@]}" ) + # ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.bastudio_configuration + break + ;; + esac + done + done + FOUNDATION_CR_SELECTED=($(echo "${foundation_component_arr[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')) + # FOUNDATION_CR_SELECTED_LOWCASE=( "${FOUNDATION_CR_SELECTED[@],,}" ) - old_fmcn="$REGISTRY_IN_FILE\/cp\/cp4a\/fncm" - old_ban="$REGISTRY_IN_FILE\/cp\/cp4a\/ban" + x=0;while [ ${x} -lt ${#FOUNDATION_CR_SELECTED[*]} ] ; do FOUNDATION_CR_SELECTED_LOWCASE[$x]=$(tr [A-Z] [a-z] <<< ${FOUNDATION_CR_SELECTED[$x]}); let x++; done + FOUNDATION_DELETE_LIST=($(echo "${FOUNDATION_CR_SELECTED[@]}" "${FOUNDATION_FULL_ARR[@]}" | tr ' ' '\n' | sort | uniq -u)) + + cp -r ${FOUNDATION_PATTERN_FILE_TMP} ${FOUNDATION_PATTERN_FILE_BAK} +} - if [ "$use_entitlement" = "yes" ] ; then - ${SED_COMMAND} "s/$REGISTRY_IN_FILE/$DOCKER_REG_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - else - ${SED_COMMAND} "s/$old_fmcn/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ban/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ldap/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2_etcd/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_busybox/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${CONTENT_PATTERN_FILE_TMP} - fi - # cp -rf ${CONTENT_PATTERN_FILE_TMP} ${CONTENT_PATTERN_FILE_BAK} - ${SED_COMMAND_FORMAT} ${CONTENT_PATTERN_FILE_TMP} - cp -rf ${CONTENT_PATTERN_FILE_TMP} ${CONTENT_PATTERN_FILE_BAK} +function get_existing_pattern_name(){ + existing_pattern_cr_name="" + existing_pattern_list="" + existing_opt_component_list="" + existing_platform_type="" + existing_deployment_type="" + printf "\x1B[1mProvide the path and file name to the existing custom resources (CR)?\n\x1B[0m" + printf "\x1B[1mPress [Enter] to accept default.\n\x1B[0m" + # printf "\x1B[1mDefault is \x1B[0m(${FOUNDATION_PATTERN_FILE_BAK}): " + # existing_pattern_cr_name=`${CLI_CMD} get icp4acluster|awk '{if(NR>1){if(NR==2){ arr=$1; }else{ arr=arr" "$1; }} } END{ print arr }'` + + while [[ $existing_pattern_cr_name == "" ]]; + do + read -p "[Default=$FOUNDATION_PATTERN_FILE_BAK]: " existing_pattern_cr_name + : ${existing_pattern_cr_name:=$FOUNDATION_PATTERN_FILE_BAK} + if [ -f "$existing_pattern_cr_name" ]; then + printf "\n" + else + echo -e "\x1B[1;31m\"$existing_pattern_cr_name\" file does not exist! \n\x1B[0m" + existing_pattern_cr_name="" + fi + done + # existing_pattern_list=`${CLI_CMD} get icp4acluster $existing_pattern_cr_name -o yaml | yq r - spec.shared_configuration.sc_deployment_patterns` + # existing_pattern_deploy_type=`${CLI_CMD} get icp4acluster $existing_pattern_cr_name -o yaml | yq r - spec.shared_configuration.sc_deployment_type` + existing_pattern_list=`cat $existing_pattern_cr_name | ${YQ_CMD} r - spec.shared_configuration.sc_deployment_patterns` + existing_opt_component_list=`cat $existing_pattern_cr_name | ${YQ_CMD} r - spec.shared_configuration.sc_optional_components` + + existing_platform_type=`cat $existing_pattern_cr_name | ${YQ_CMD} r - spec.shared_configuration.sc_deployment_platform` + existing_deployment_type=`cat $existing_pattern_cr_name | ${YQ_CMD} r - spec.shared_configuration.sc_deployment_type` - oc delete -f ${CONTENT_PATTERN_FILE_BAK} >/dev/null 2>&1 - sleep 5 - printf "\n" - echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" - APPLY_CONTENT_CMD="oc apply -f ${CONTENT_PATTERN_FILE_BAK}" + case "${existing_deployment_type}" in + demo*) DEPLOYMENT_TYPE="demo";; + enterprise*) DEPLOYMENT_TYPE="enterprise";; + *) + echo -e "\x1B[1;31mNot valid deployment type found in CR, exiting....\n\x1B[0m" + exit 0 + ;; + esac - if $APPLY_CONTENT_CMD ; then - echo -e "\x1B[1mDone\x1B[0m" - else - echo -e "\x1B[1;31mFailed\x1B[0m" - fi + case "${existing_platform_type}" in + ROKS*) PLATFORM_SELECTED="ROKS";; + OCP*) PLATFORM_SELECTED="OCP";; + other*) PLATFORM_SELECTED="other";; + *) + echo -e "\x1B[1;31mNot valid platform type found in CR, exiting....\n\x1B[0m" + exit 0 + ;; + esac + OIFS=$IFS + IFS=',' read -r -a EXISTING_PATTERN_ARR <<< "$existing_pattern_list" + IFS=$OIFS - printf "\n" - echo -e "\x1B[1mThe custom resource file used is: \"${CONTENT_PATTERN_FILE_BAK}\"\x1B[0m" + OIFS=$IFS + IFS=',' read -r -a EXISTING_OPT_COMPONENT_ARR <<< "$existing_opt_component_list" + IFS=$OIFS +} +function select_objectstore_number(){ printf "\n" - echo -e "\x1B[1mTo monitor the deployment status, follow the Operator logs. For details, refer to the troubleshooting section in Knowledge Center here: \x1B[0m" - echo -e "\x1B[1mhttps://www.ibm.com/support/knowledgecenter/en/SSYHZ8_19.0.x/com.ibm.dba.install/op_topics/tsk_ca_troubleshoot.html\x1B[0m" - + printf "\x1B[1mHow many object stores is being deployed? \x1B[0m" + content_os_number="" + while [[ $content_os_number == "" ]]; + do + read -rp "" content_os_number + if ! [[ "$content_os_number" =~ ^[1-3]$ ]]; then + echo -e "\x1B[1;31mEnter a valid number [1 to 3]\x1B[0m" + content_os_number="" + fi + done } -# End - Modify CONTENT pattern yaml according pattent/components selected - -# Begin - Modify APPLICATION pattern yaml according pattent/components selected -function apply_application_pattern_cr(){ - cp -r ${APPLICATION_PATTERN_FILE_BAK} ${APPLICATION_PATTERN_FILE_TMP} - # # Set sc_deployment_patterns=application - # ${SED_COMMAND} "s|sc_deployment_patterns:.*|sc_deployment_patterns: application|g" ${APPLICATION_PATTERN_FILE_TMP} - # Set sc_deployment_hostname_suffix - ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}.${INFRA_NAME}\"|g" ${APPLICATION_PATTERN_FILE_TMP} +# function select_ads_designer(){ +# INSTALL_ADS_DESIGNER="" +# ads_designer_install="" +# printf "\n" +# printf "(Note: if you are deploying a development environment where you want to design\n" +# printf "and manage your decision projects, then you would want this option)\n" +# printf "\x1B[1mDo you want ADS Decision Designer to be installed? \x1B[0m" + +# while [[ $ads_designer_install == "" ]]; +# do +# read -rp "" ads_designer_install +# case "$ads_designer_install" in +# "y"|"Y"|"yes"|"Yes"|"YES") +# INSTALL_ADS_DESIGNER="Yes" +# break +# ;; +# "n"|"N"|"no"|"No"|"NO") +# INSTALL_ADS_DESIGNER="No" +# break +# ;; +# *) +# printf "\x1B[1mDo you want ADS Decision Designer to be installed (Yes/No)? \x1B[0m" +# ads_designer_install="" +# ;; +# esac +# done +# } + + +function set_baa_app_designer(){ + cp -r ${APPLICATION_PATTERN_FILE_BAK} ${APPLICATION_PATTERN_FILE_TMP} + if [[ $DEPLOYMENT_TYPE == "demo" ]] ; + then + foundation_baa=("BAS") + foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_baa[@]}" ) - # Set sc_dynamic_storage_classname - ${SED_COMMAND} "s|sc_dynamic_storage_classname:.*|sc_dynamic_storage_classname: ${storage_class_name}|g" ${APPLICATION_PATTERN_FILE_TMP} + elif [[ $DEPLOYMENT_TYPE == "enterprise" ]] + then + containsElement "app_designer" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + foundation_baa=("BAS") + foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_baa[@]}" ) + fi + fi + cp -r ${APPLICATION_PATTERN_FILE_TMP} ${APPLICATION_PATTERN_FILE_BAK} +} - # Set image_pull_secrets - ${SED_COMMAND} "s|image-pull-secret|$DOCKER_RES_SECRET_NAME|g" ${APPLICATION_PATTERN_FILE_TMP} +function set_ads_designer_runtime(){ + cp -r ${ADS_PATTERN_FILE_BAK} ${ADS_PATTERN_FILE_TMP} + if [[ $DEPLOYMENT_TYPE == "demo" ]] ; + then + ${YQ_CMD} w -i ${ADS_PATTERN_FILE_TMP} spec.ads_configuration.decision_designer.enabled "true" + ${YQ_CMD} w -i ${ADS_PATTERN_FILE_TMP} spec.ads_configuration.decision_runtime.enabled "true" + foundation_ads=("BAS") + foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_ads[@]}" ) - if [ "$use_entitlement" = "yes" ] ; then - # new_docker_reg_server="$DOCKER_REG_SERVER\/cp\/cp4a\/fncm" - ${SED_COMMAND} "s/cp.icr.io/$DOCKER_REG_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - else - old_ums="cp.icr.io\/cp\/cp4a\/ums" - old_aae="cp.icr.io\/cp\/cp4a\/aae" - old_ban="cp.icr.io\/cp\/cp4a\/ban" - old_bas="cp.icr.io\/cp\/cp4a\/bas" - - ${SED_COMMAND} "s/$old_ums/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_aae/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ban/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_bas/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ldap/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2_etcd/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_busybox/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${APPLICATION_PATTERN_FILE_TMP} + elif [[ $DEPLOYMENT_TYPE == "enterprise" ]] + then + # yq d -i ${ACA_PATTERN_FILE_TMP} spec.datasource_configuration.dc_ca_datasource.tenant_databases + containsElement "ads_designer" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + ${YQ_CMD} w -i ${ADS_PATTERN_FILE_TMP} spec.ads_configuration.decision_designer.enabled "true" + foundation_ads=("BAS") + foundation_component_arr=( "${foundation_component_arr[@]}" "${foundation_ads[@]}" ) + else + ${YQ_CMD} w -i ${ADS_PATTERN_FILE_TMP} spec.ads_configuration.decision_designer.enabled "false" + fi + containsElement "ads_runtime" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + ${YQ_CMD} w -i ${ADS_PATTERN_FILE_TMP} spec.ads_configuration.decision_runtime.enabled "true" + else + ${YQ_CMD} w -i ${ADS_PATTERN_FILE_TMP} spec.ads_configuration.decision_runtime.enabled "false" + fi + fi + cp -r ${ADS_PATTERN_FILE_TMP} ${ADS_PATTERN_FILE_BAK} +} - ${SED_COMMAND_FORMAT} ${APPLICATION_PATTERN_FILE_TMP} - cp -rf ${APPLICATION_PATTERN_FILE_TMP} ${APPLICATION_PATTERN_FILE_BAK} - oc delete -f ${APPLICATION_PATTERN_FILE_BAK} >/dev/null 2>&1 - sleep 5 - printf "\n" - echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" - # printf "\n" - APPLY_APPLICATION_CMD="oc apply -f ${APPLICATION_PATTERN_FILE_BAK}" - if $APPLY_APPLICATION_CMD ; then - echo -e "\x1B[1mDone\x1B[0m" - else - echo -e "\x1B[1;31mFailed\x1B[0m" +function set_decision_feature(){ + cp -r ${DECISIONS_PATTERN_FILE_BAK} ${DECISIONS_PATTERN_FILE_TMP} + if [[ $DEPLOYMENT_TYPE == "demo" ]] ; + then + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionCenter.enabled "true" + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionServerRuntime.enabled "true" + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionRunner.enabled "true" + elif [[ $DEPLOYMENT_TYPE == "enterprise" ]] + then + # yq d -i ${ACA_PATTERN_FILE_TMP} spec.datasource_configuration.dc_ca_datasource.tenant_databases + containsElement "decisionCenter" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionCenter.enabled "true" + else + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionCenter.enabled "false" + fi + containsElement "decisionServerRuntime" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionServerRuntime.enabled "true" + else + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionServerRuntime.enabled "false" + fi + containsElement "decisionRunner" "${OPT_COMPONENTS_CR_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionRunner.enabled "true" + else + ${YQ_CMD} w -i ${DECISIONS_PATTERN_FILE_TMP} spec.odm_configuration.decisionRunner.enabled "false" + fi fi - printf "\n" - echo -e "\x1B[1mThe custom resource file used is: \"${APPLICATION_PATTERN_FILE_BAK}\"...\x1B[0m" + cp -r ${DECISIONS_PATTERN_FILE_TMP} ${DECISIONS_PATTERN_FILE_BAK} } -# End - Modify APPLICATION pattern yaml according pattent/components selected -# Begin - Modify Automation Content Analyzer pattern yaml according pattent/components selected -function apply_aca_pattern_cr(){ - cp -r ${ACA_PATTERN_FILE_BAK} ${ACA_PATTERN_FILE_TMP} +# Begin - Modify FOUNDATION pattern yaml according patterns/components selected +function apply_pattern_cr(){ + # echo "length of optional_component_cr_arr:${#optional_component_cr_arr[@]}" + # echo "!!optional_component_cr_arr!!!${optional_component_cr_arr[*]}" + # echo "PATTERNS_CR_SELECTED: ${PATTERNS_CR_SELECTED[*]}" + # echo "OPT_COMPONENTS_CR_SELECTED: ${OPT_COMPONENTS_CR_SELECTED[*]}" + # echo "FOUNDATION_CR_SELECTED_LOWCASE: ${FOUNDATION_CR_SELECTED_LOWCASE[*]}" + # echo "FOUNDATION_DELETE_LIST: ${FOUNDATION_DELETE_LIST[*]}" + # echo "OPTIONAL_COMPONENT_DELETE_LIST: ${OPTIONAL_COMPONENT_DELETE_LIST[*]}" + # echo "KEEP_COMPOMENTS: ${KEEP_COMPOMENTS[*]}" + # echo "REMOVED FOUNDATION_CR_SELECTED FROM OPTIONAL_COMPONENT_DELETE_LIST: ${OPTIONAL_COMPONENT_DELETE_LIST[*]}" + # echo "pattern list in CR: ${pattern_joined}" + # echo "optional components list in CR: ${opt_components_joined}" + # echo "length of optional_component_arr:${#optional_component_arr[@]}" + + # read -rsn1 -p"Press any key to continue (DEBUG MODEL)";echo + + cp -r ${FOUNDATION_PATTERN_FILE_BAK} ${FOUNDATION_PATTERN_FILE_TMP} + + tps=" ${OPTIONAL_COMPONENT_DELETE_LIST[*]} " + for item in ${KEEP_COMPOMENTS[@]}; do + tps=${tps/ ${item} / } + done + OPTIONAL_COMPONENT_DELETE_LIST=( $tps ) + # Convert pattern array to pattern list by common + delim="" + pattern_joined="" + for item in "${PATTERNS_CR_SELECTED[@]}"; do + pattern_joined="$pattern_joined$delim$item" + delim="," + done + if [[ $INSTALL_BAW_IAWS == "No" ]];then + pattern_joined="foundation$delim$pattern_joined" + fi + # Convert optional components array to list by common + delim="" + opt_components_joined="" + for item in "${OPT_COMPONENTS_CR_SELECTED[@]}"; do + opt_components_joined="$opt_components_joined$delim$item" + delim="," + done + + + merge_pattern + merge_optional_components + set_foundation_components + + # Set sc_deployment_patterns + ${SED_COMMAND} "s|sc_deployment_patterns:.*|sc_deployment_patterns: \"$pattern_joined\"|g" ${FOUNDATION_PATTERN_FILE_TMP} + # Set sc_optional_components='' when none optional component selected - if [ "$COMPONENTS_SELECTED" = "None" ]; then - ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"\"|g" ${ACA_PATTERN_FILE_TMP} + if [ "${#optional_component_cr_arr[@]}" -eq "0" ]; then + ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"\"|g" ${FOUNDATION_PATTERN_FILE_TMP} else - ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"$COMPONENTS_SELECTED\"|g" ${ACA_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"$opt_components_joined\"|g" ${FOUNDATION_PATTERN_FILE_TMP} fi - if [[ $COMPONENTS_SELECTED == *"ums"* ]]; then - aca_start="$(grep -n "ums_configuration:" ${ACA_PATTERN_FILE_TMP} | head -n 1 | cut -d: -f1)" - aca_stop="$(tail -n +$aca_start < ${ACA_PATTERN_FILE_TMP} | grep -n "tag:" | head -n1 | cut -d: -f1)" - aca_stop=$(( $aca_stop + $aca_start - 1)) + # Set sc_deployment_platform + ${SED_COMMAND} "s|sc_deployment_platform:.*|sc_deployment_platform: \"$PLATFORM_SELECTED\"|g" ${FOUNDATION_PATTERN_FILE_TMP} + + # Set sc_deployment_type + ${SED_COMMAND} "s|sc_deployment_type:.*|sc_deployment_type: \"$DEPLOYMENT_TYPE\"|g" ${FOUNDATION_PATTERN_FILE_TMP} - vi ${ACA_PATTERN_FILE_TMP} -c ':'"${aca_start}"','"${aca_stop}"'s/^#/' -c ':wq' - fi # Set sc_deployment_hostname_suffix - ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}.${INFRA_NAME}\"|g" ${ACA_PATTERN_FILE_TMP} + if [[ $PLATFORM_SELECTED == "OCP" ]]; + then + ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}.${INFRA_NAME}\"|g" ${FOUNDATION_PATTERN_FILE_TMP} + else + ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}\"|g" ${FOUNDATION_PATTERN_FILE_TMP} + fi + + # Set lc_selected_ldap_type + if [[ $DEPLOYMENT_TYPE == "enterprise" ]];then + if [[ $LDAP_TYPE == "AD" ]];then + # ${YQ_CMD} w -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ldap_configuration.lc_selected_ldap_type "\"Microsoft Active Directory\"" + ${SED_COMMAND} "s|lc_selected_ldap_type:.*|lc_selected_ldap_type: \"Microsoft Active Directory\"|g" ${FOUNDATION_PATTERN_FILE_TMP} + + elif [[ $LDAP_TYPE == "TDS" ]] + then + # ${YQ_CMD} w -i ${FOUNDATION_PATTERN_FILE_TMP} spec.ldap_configuration.lc_selected_ldap_type "IBM Security Directory Server" + ${SED_COMMAND} "s|lc_selected_ldap_type:.*|lc_selected_ldap_type: \"IBM Security Directory Server\"|g" ${FOUNDATION_PATTERN_FILE_TMP} + fi + fi # Set sc_dynamic_storage_classname - ${SED_COMMAND} "s|sc_dynamic_storage_classname:.*|sc_dynamic_storage_classname: ${storage_class_name}|g" ${ACA_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_dynamic_storage_classname:.*|sc_dynamic_storage_classname: ${STORAGE_CLASS_NAME}|g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_slow_file_storage_classname:.*|sc_slow_file_storage_classname: ${SLOW_STORAGE_CLASS_NAME}|g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_medium_file_storage_classname:.*|sc_medium_file_storage_classname: ${MEDIUM_STORAGE_CLASS_NAME}|g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_fast_file_storage_classname:.*|sc_fast_file_storage_classname: ${FAST_STORAGE_CLASS_NAME}|g" ${FOUNDATION_PATTERN_FILE_TMP} + # Set image_pull_secrets + # ${SED_COMMAND} "s|image-pull-secret|$DOCKER_RES_SECRET_NAME|g" ${FOUNDATION_PATTERN_FILE_TMP} + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} spec.shared_configuration.image_pull_secrets + ${YQ_CMD} w -i ${FOUNDATION_PATTERN_FILE_TMP} spec.shared_configuration.image_pull_secrets.[0] "$DOCKER_RES_SECRET_NAME" + # set sc_image_repository if [ "$use_entitlement" = "yes" ] ; then - ${SED_COMMAND} "s/cp.icr.io/$DOCKER_REG_SERVER/g" ${ACA_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_image_repository:.*|sc_image_repository: ${DOCKER_REG_SERVER}|g" ${FOUNDATION_PATTERN_FILE_TMP} else - old_ums="cp.icr.io\/cp\/cp4a\/ums" - old_aca="cp.icr.io\/cp\/cp4a\/baca" - - ${SED_COMMAND} "s/$old_ums/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${ACA_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_aca/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${ACA_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${ACA_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ldap/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${ACA_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2_etcd/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${ACA_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_busybox/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${ACA_PATTERN_FILE_TMP} + ${SED_COMMAND} "s|sc_image_repository:.*|sc_image_repository: ${CONVERT_LOCAL_REGISTRY_SERVER}|g" ${FOUNDATION_PATTERN_FILE_TMP} fi - ${SED_COMMAND_FORMAT} ${ACA_PATTERN_FILE_TMP} - cp -rf ${ACA_PATTERN_FILE_TMP} ${ACA_PATTERN_FILE_BAK} - - oc delete -f ${ACA_PATTERN_FILE_BAK} >/dev/null 2>&1 - sleep 5 - printf "\n" - echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" + # Replace image URL + old_fmcn="$REGISTRY_IN_FILE\/cp\/cp4a\/fncm" + old_ban="$REGISTRY_IN_FILE\/cp\/cp4a\/ban" + old_ums="$REGISTRY_IN_FILE\/cp\/cp4a\/ums" + old_bas="$REGISTRY_IN_FILE\/cp\/cp4a\/bas" + old_aae="$REGISTRY_IN_FILE\/cp\/cp4a\/aae" + old_baca="$REGISTRY_IN_FILE\/cp\/cp4a\/baca" + old_odm="$REGISTRY_IN_FILE\/cp\/cp4a\/odm" + old_baw="$REGISTRY_IN_FILE\/cp\/cp4a\/baw" + old_iaws="$REGISTRY_IN_FILE\/cp\/cp4a\/iaws" + old_ads="$REGISTRY_IN_FILE\/cp\/cp4a\/ads" + old_bai="$REGISTRY_IN_FILE\/cp\/cp4a" + old_workflow="$REGISTRY_IN_FILE\/cp\/cp4a\/workflow" - APPLY_ACA_CMD="oc apply -f ${ACA_PATTERN_FILE_BAK}" - if $APPLY_ACA_CMD ; then - echo -e "\x1B[1mDone\x1B[0m" + if [ "$use_entitlement" = "yes" ] ; then + ${SED_COMMAND} "s/$REGISTRY_IN_FILE/$DOCKER_REG_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} else - echo -e "\x1B[1;31mFailed\x1B[0m" + ${SED_COMMAND} "s/$old_fmcn/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_ban/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_db2/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_ldap/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_db2_etcd/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_busybox/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_ums/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_bas/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_aae/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_baca/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_odm/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_baw/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_iaws/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_ads/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "s/$old_workflow/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${FOUNDATION_PATTERN_FILE_TMP} + ${SED_COMMAND} "/imageCredentials:/{n;s/registry:.*/registry: "${CONVERT_LOCAL_REGISTRY_SERVER}"/;}" ${FOUNDATION_PATTERN_FILE_TMP} + fi - printf "\n" - echo -e "\x1B[1mThe custom resource file used is: \"${ACA_PATTERN_FILE_BAK}\"...\x1B[0m" -} -# End - Modify Automation Content Analyzer pattern yaml according pattent/components selected + # If BAI is selected as an optional component in a demo deployment, the installation of IBM Event Streams + # 10.0.0+ in the namespace targeted by the ICP4A deployment is a prerequisite. The connection + # information for Kafka clients is automatically extracted from the Event Streams instance + # and stored in shared_configuration.kafka_configuration. -# Begin - Modify WORKSTREAMS pattern yaml according pattent/components selected -function apply_workstreams_pattern_cr(){ - cp -rf ${WORKSTREAMS_PATTERN_FILE_BAK} ${WORKSTREAMS_PATTERN_FILE_TMP} + if [[ $DEPLOYMENT_TYPE == "demo" ]];then + containsElement "BusinessAutomationInsights" "${OPT_COMPONENTS_SELECTED[@]}" + retVal=$? + if [[ $retVal -eq 0 ]]; then + printf "\n" + while true; do + printf "\x1B[1mHas IBM Event Streams already been deployed to the same namespace for CP4A (a prerequisite for Business Automation Insights)?\x1B[0m" + printf "\n" + printf "\x1B[1mFor more information about the IBM Event Streams supported version number and licensing restrictions, see IBM Knowledge Center\x1B[0m" + read -rp "?(Yes/No):" ans + case "$ans" in + "y"|"Y"|"yes"|"Yes"|"YES") + ${CUR_DIR}/pull-eventstreams-connection-info.sh -f ${FOUNDATION_PATTERN_FILE_TMP} || true + break + ;; + "n"|"N"|"no"|"No"|"NO") + echo -e "\x1B[1;31mPlease refer to the documentation in Knowledge Center and deploy IBM Event Streams to the same namespace before proceeding with CP4A deployment.\n\x1B[0m" + echo -e "Exiting...\n" + exit 0 + ;; + *) + echo -e "Answer must be \"Yes\" or \"No\"\n" + ;; + esac + done + fi + fi + + # remove merge issue + ${YQ_CMD} d -i ${FOUNDATION_PATTERN_FILE_TMP} metadata.labels.app.* + # Keep existing value + if [[ "${DEPLOYMENT_TYPE}" == "existing" ]]; then + ${YQ_CMD} m -x -i ${FOUNDATION_PATTERN_FILE_TMP} ${FOUNDATION_EXISTING_BAK} + fi - # Set sc_deployment_hostname_suffix - ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}.${INFRA_NAME}\"|g" ${WORKSTREAMS_PATTERN_FILE_TMP} + ${SED_COMMAND_FORMAT} ${FOUNDATION_PATTERN_FILE_TMP} + cp -rf ${FOUNDATION_PATTERN_FILE_TMP} ${FOUNDATION_PATTERN_FILE_BAK} + if [[ "$DEPLOYMENT_TYPE" == "demo" && "$INSTALLATION_TYPE" == "new" && $1 != "review" ]];then + ${CLI_CMD} delete -f ${FOUNDATION_PATTERN_FILE_BAK} >/dev/null 2>&1 + sleep 5 + printf "\n" + echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" - # Set sc_dynamic_storage_classname - ${SED_COMMAND} "s|sc_dynamic_storage_classname:.*|sc_dynamic_storage_classname: ${storage_class_name}|g" ${WORKSTREAMS_PATTERN_FILE_TMP} + APPLY_CONTENT_CMD="${CLI_CMD} apply -f ${FOUNDATION_PATTERN_FILE_BAK}" - # Set image_pull_secrets - ${SED_COMMAND} "s|image-pull-secret|${DOCKER_RES_SECRET_NAME}|g" ${WORKSTREAMS_PATTERN_FILE_TMP} + if $APPLY_CONTENT_CMD ; then + echo -e "\x1B[1mDone\x1B[0m" + else + echo -e "\x1B[1;31mFailed\x1B[0m" + fi + elif [[ "$DEPLOYMENT_TYPE" == "demo" && "$INSTALLATION_TYPE" == "existing" && $1 != "review" ]] + then + echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" + + APPLY_CONTENT_CMD="${CLI_CMD} apply -f ${FOUNDATION_PATTERN_FILE_BAK}" + + if $APPLY_CONTENT_CMD ; then + echo -e "\x1B[1mDone\x1B[0m" + else + echo -e "\x1B[1;31mFailed\x1B[0m" + fi - if [ "$use_entitlement" = "yes" ] ; then - # new_docker_reg_server="$DOCKER_REG_SERVER\/cp\/cp4a\/fncm" - ${SED_COMMAND} "s/cp.icr.io/$DOCKER_REG_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - else - old_ums="cp.icr.io\/cp\/cp4a\/ums" - old_aae="cp.icr.io\/cp\/cp4a\/aae" - old_ban="cp.icr.io\/cp\/cp4a\/ban" - old_bas="cp.icr.io\/cp\/cp4a\/bas" - old_fncm="cp.icr.io\/cp\/cp4a\/fncm" - old_iaws="cp.icr.io\/cp\/cp4a\/iaws" - - ${SED_COMMAND} "s/$old_ums/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_aae/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ban/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_bas/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_fncm/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_iaws/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ldap/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2_etcd/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_busybox/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${WORKSTREAMS_PATTERN_FILE_TMP} fi + printf "\n" + echo -e "\x1B[1mThe custom resource file used is: \"${FOUNDATION_PATTERN_FILE_BAK}\"\x1B[0m" - ${SED_COMMAND_FORMAT} ${WORKSTREAMS_PATTERN_FILE_TMP} - cp -rf ${WORKSTREAMS_PATTERN_FILE_TMP} ${WORKSTREAMS_PATTERN_FILE_BAK} + printf "\n" + echo -e "\x1B[1mTo monitor the deployment status, follow the Operator logs.\x1B[0m" + echo -e "\x1B[1mFor details, refer to the troubleshooting section in Knowledge Center here: \x1B[0m" + echo -e "\x1B[1mhttps://www.ibm.com/support/knowledgecenter/SSYHZ8_20.0.x/com.ibm.dba.install/op_topics/tsk_trbleshoot_operators.html\x1B[0m" +} +# End - Modify FOUNDATION pattern yaml according pattent/components selected - oc delete -f ${WORKSTREAMS_PATTERN_FILE_BAK} >/dev/null 2>&1 - sleep 5 +function show_summary(){ printf "\n" - echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" - # printf "\n" - APPLY_WORKSTREAMS_CMD="oc apply -f ${WORKSTREAMS_PATTERN_FILE_BAK}" - if $APPLY_WORKSTREAMS_CMD ; then - echo -e "\x1B[1mDone\x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1m Summary of input \x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" + + echo -e "\x1B[1;31m1. Cloud Pak capability to deploy: \x1B[0m" + if [ "${#pattern_arr[@]}" -eq "0" ]; then + printf ' * %s\n' "None" else - echo -e "\x1B[1;31mFailed\x1B[0m" + printf ' * %s\n' "${pattern_arr[@]}" fi - printf "\n" - echo -e "\x1B[1mThe custom resource file used is: \"${WORKSTREAMS_PATTERN_FILE_BAK}\"...\x1B[0m" + echo -e "\x1B[1;31m2. Optional components to deploy: \x1B[0m" + if [ "${#OPT_COMPONENTS_SELECTED[@]}" -eq "0" ]; then + printf ' * %s\n' "None" + else + # printf ' * %s\n' "${OPT_COMPONENTS_SELECTED[@]}" + for each_opt_component in "${OPT_COMPONENTS_SELECTED[@]}" + do + if [[ ${each_opt_component} == "ExternalShare" ]]; then + printf ' * %s\n' "External Share" + elif [[ ${each_opt_component} == "TaskManager" ]] + then + printf ' * %s\n' "Task Manager" + elif [[ ${each_opt_component} == "ContentSearchServices" ]] + then + printf ' * %s\n' "Content Search Services" + elif [[ ${each_opt_component} == "DecisionCenter" ]] + then + printf ' * %s\n' "Decision Center" + elif [[ ${each_opt_component} == "RuleExecutionServer" ]] + then + printf ' * %s\n' "Rule Execution Server" + elif [[ ${each_opt_component} == "DecisionRunner" ]] + then + printf ' * %s\n' "Decision Runner" + elif [[ ${each_opt_component} == "DecisionDesigner" ]] + then + printf ' * %s\n' "Decision Designer" + elif [[ ${each_opt_component} == "DecisionRuntime" ]] + then + printf ' * %s\n' "Decision Runtime" + elif [[ "${each_opt_component}" == "ContentManagementInteroperabilityServices" ]] + then + printf ' * %s\n' "Content Management Interoperability Services" + elif [[ "${each_opt_component}" == "UserManagementService" ]] + then + printf ' * %s\n' "User Management Service" + elif [[ "${each_opt_component}" == "BusinessAutomationInsights" ]] + then + printf ' * %s\n' "Business Automation Insights" + elif [[ "${each_opt_component}" == "ApplicationDesigner" ]] + then + printf ' * %s\n' "Application Designer" + else + printf ' * %s\n' "${each_opt_component}" + fi + done + fi + + echo -e "\x1B[1;31m3. Entitlement Registry key:\x1B[0m ${DOCKER_REG_KEY}" + echo -e "\x1B[1;31m4. Docker registry service name or URL:\x1B[0m ${LOCAL_REGISTRY_SERVER}" + echo -e "\x1B[1;31m5. Docker registry user name:\x1B[0m ${LOCAL_REGISTRY_USER}" + # echo -e "\x1B[1;31m5. Docker registry password: ${LOCAL_REGISTRY_PWD}\x1B[0m" + echo -e "\x1B[1;31m6. Docker registry password:\x1B[0m" # not show plaintext password + if [[ $PLATFORM_SELECTED == "OCP" ]]; + then + echo -e "\x1B[1;31m7. OCP Infrastructure Node:\x1B[0m ${INFRA_NAME}" + if [[ $DEPLOYMENT_TYPE == "demo" ]]; + then + echo -e "\x1B[1;31m8. Dynamic storage classname:\x1B[0m ${STORAGE_CLASS_NAME}" + else + echo -e "\x1B[1;31m8. Dynamic storage classname:\x1B[0m" + printf ' * \x1B[1;31m%s\x1B[0m %s\n' "Slow:" "${SLOW_STORAGE_CLASS_NAME}" + printf ' * \x1B[1;31m%s\x1B[0m %s\n' "Medium:" "${MEDIUM_STORAGE_CLASS_NAME}" + printf ' * \x1B[1;31m%s\x1B[0m %s\n' "Fast:" "${FAST_STORAGE_CLASS_NAME}" + fi + else + if [[ $DEPLOYMENT_TYPE == "demo" ]]; + then + echo -e "\x1B[1;31m7. Dynamic storage classname:\x1B[0m ${STORAGE_CLASS_NAME}" + else + echo -e "\x1B[1;31m7. Dynamic storage classname:\x1B[0m" + printf ' * \x1B[1;31m%s\x1B[0m %s\n' "Slow:" "${SLOW_STORAGE_CLASS_NAME}" + printf ' * \x1B[1;31m%s\x1B[0m %s\n' "Medium:" "${MEDIUM_STORAGE_CLASS_NAME}" + printf ' * \x1B[1;31m%s\x1B[0m %s\n' "Fast:" "${FAST_STORAGE_CLASS_NAME}" + fi + fi + + echo -e "\x1B[1m*******************************************************\x1B[0m" } -# End - Modify WORKSTREAMS pattern yaml according pattent/components selected -# Begin - Modify DECISIONS pattern yaml according pattent/components selected -function apply_decisions_pattern_cr(){ - cp -rf ${DECISIONS_PATTERN_FILE_BAK} ${DECISIONS_PATTERN_FILE_TMP} - # Set dba_license=accept - ${SED_COMMAND} "s|dba_license:.*|dba_license: accept|g" ${DECISIONS_PATTERN_FILE_TMP} - # Set sc_optional_components - ${SED_COMMAND} "s|sc_optional_components:.*|sc_optional_components: \"\"|g" ${DECISIONS_PATTERN_FILE_TMP} +function prepare_pattern_file(){ + if [[ "${INSTALLATION_TYPE}" == "new" ]]; then + rm -rf $TEMP_FOLDER >/dev/null 2>&1 + rm -rf $BAK_FOLDER >/dev/null 2>&1 + rm -rf $FINAL_CR_FOLDER >/dev/null 2>&1 + fi + mkdir -p $TEMP_FOLDER >/dev/null 2>&1 + mkdir -p $BAK_FOLDER >/dev/null 2>&1 + mkdir -p $FINAL_CR_FOLDER >/dev/null 2>&1 - # Set sc_deployment_hostname_suffix - ${SED_COMMAND} "s|sc_deployment_hostname_suffix:.*|sc_deployment_hostname_suffix: \"{{ meta.namespace }}.${INFRA_NAME}\"|g" ${DECISIONS_PATTERN_FILE_TMP} + cp -rf "${OPERATOR_FILE}" "${OPERATOR_FILE_BAK}" + cp -rf "${OPERATOR_PVC_FILE}" "${OPERATOR_PVC_FILE_BAK}" - # Set sc_dynamic_storage_classname - ${SED_COMMAND} "s|sc_dynamic_storage_classname:.*|sc_dynamic_storage_classname: ${storage_class_name}|g" ${DECISIONS_PATTERN_FILE_TMP} + if [[ "$DEPLOYMENT_TYPE" == "enterprise" ]];then + DEPLOY_TYPE_IN_FILE_NAME="enterprise" + else + DEPLOY_TYPE_IN_FILE_NAME="demo" + fi - # Set image_pull_secrets - ${SED_COMMAND} "s|admin.registrykey|${DOCKER_RES_SECRET_NAME}|g" ${DECISIONS_PATTERN_FILE_TMP} + FOUNDATION_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_foundation.yaml - if [ "$use_entitlement" = "yes" ] ; then - ${SED_COMMAND} "s/cp.icr.io/$DOCKER_REG_SERVER/g" ${DECISIONS_PATTERN_FILE_TMP} + CONTENT_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_content.yaml + CONTENT_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_content_tmp.yaml + CONTENT_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_content.yaml - else - old_odm="cp.icr.io\/cp\/cp4a\/odm" + APPLICATION_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_application.yaml + APPLICATION_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_application_tmp.yaml + APPLICATION_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_application.yaml - ${SED_COMMAND} "s/$old_odm/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${DECISIONS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${DECISIONS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_ldap/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${DECISIONS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_db2_etcd/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${DECISIONS_PATTERN_FILE_TMP} - ${SED_COMMAND} "s/$old_busybox/$CONVERT_LOCAL_REGISTRY_SERVER/g" ${DECISIONS_PATTERN_FILE_TMP} - fi + DECISIONS_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_decisions.yaml + DECISIONS_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_decisions_tmp.yaml + DECISIONS_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_decisions.yaml - ${SED_COMMAND_FORMAT} ${DECISIONS_PATTERN_FILE_TMP} - cp -rf ${DECISIONS_PATTERN_FILE_TMP} ${DECISIONS_PATTERN_FILE_BAK} + ADS_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_decisions_ads.yaml + ADS_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_decisions_ads_tmp.yaml + ADS_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_decisions_ads.yaml - oc delete -f ${DECISIONS_PATTERN_FILE_BAK} >/dev/null 2>&1 - sleep 5 - printf "\n" - echo -e "\x1B[1mInstalling the selected Cloud Pak capability...\x1B[0m" - # printf "\n" - APPLY_DECISIONS_CMD="oc apply -f ${DECISIONS_PATTERN_FILE_BAK}" - if $APPLY_DECISIONS_CMD ; then - echo -e "\x1B[1mDone\x1B[0m" + ACA_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_aca.yaml + ACA_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_aca_tmp.yaml + ACA_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_aca.yaml + + # Temp to user ADS template. + ADW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_digitalworker.yaml + ADW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_digitalworker_tmp.yaml + ADW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_digitalworker.yaml + + + cp -rf "${CONTENT_PATTERN_FILE}" "${CONTENT_PATTERN_FILE_BAK}" + cp -rf "${APPLICATION_PATTERN_FILE}" "${APPLICATION_PATTERN_FILE_BAK}" + cp -rf "${ADS_PATTERN_FILE}" "${ADS_PATTERN_FILE_BAK}" + cp -rf "${DECISIONS_PATTERN_FILE}" "${DECISIONS_PATTERN_FILE_BAK}" + cp -rf "${ACA_PATTERN_FILE}" "${ACA_PATTERN_FILE_BAK}" + cp -rf "${ADW_PATTERN_FILE}" "${ADW_PATTERN_FILE_BAK}" + # support existing installation + if [ -f "$FOUNDATION_PATTERN_FILE_BAK" ]; then + cp -rf "${FOUNDATION_PATTERN_FILE_BAK}" "${FOUNDATION_EXISTING_BAK}" + ${YQ_CMD} d -i ${FOUNDATION_EXISTING_BAK} spec.shared_configuration else - echo -e "\x1B[1;31mFailed\x1B[0m" + cp -rf "${FOUNDATION_PATTERN_FILE}" "${FOUNDATION_PATTERN_FILE_BAK}" fi - printf "\n" - echo -e "\x1B[1mThe custom resource file used is: \"${DECISIONS_PATTERN_FILE_BAK}\"...\x1B[0m" - + if [[ "$DEPLOYMENT_TYPE" == "demo" ]];then + WORKFLOW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow.yaml + WORKFLOW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow_tmp.yaml + WORKFLOW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow.yaml + + # WORKSTREAMS_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workstreams.yaml + # WORKSTREAMS_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workstreams_tmp.yaml + # WORKSTREAMS_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workstreams.yaml + + WW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow-workstreams.yaml + WW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow-workstreams_tmp.yaml + WW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow-workstreams.yaml + cp -rf "${WORKFLOW_PATTERN_FILE}" "${WORKFLOW_PATTERN_FILE_BAK}" + cp -rf "${WW_PATTERN_FILE}" "${WW_PATTERN_FILE_BAK}" + # get_baw_mode + # retVal_baw=$? + # if [ $retVal_baw -eq 0 ]; then + # WORKFLOW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow.yaml + # WORKFLOW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow_tmp.yaml + # WORKFLOW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow.yaml + # else + # WORKFLOW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow-workstreams.yaml + # WORKFLOW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow-workstreams_tmp.yaml + # WORKFLOW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_workflow-workstreams.yaml + # fi + elif [[ "$DEPLOYMENT_TYPE" == "enterprise" ]] + then + WORKFLOW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workflow.yaml + WORKFLOW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workflow_tmp.yaml + WORKFLOW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workflow.yaml + + WORKSTREAMS_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workstreams.yaml + WORKSTREAMS_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workstreams_tmp.yaml + WORKSTREAMS_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workstreams.yaml + + WW_PATTERN_FILE=${PARENT_DIR}/descriptors/patterns/ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workflow-workstreams.yaml + WW_PATTERN_FILE_TMP=$TEMP_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workflow-workstreams_tmp.yaml + WW_PATTERN_FILE_BAK=$BAK_FOLDER/.ibm_cp4a_cr_${DEPLOY_TYPE_IN_FILE_NAME}_FC_workflow-workstreams.yaml + cp -rf "${WORKFLOW_PATTERN_FILE}" "${WORKFLOW_PATTERN_FILE_BAK}" + cp -rf "${WORKSTREAMS_PATTERN_FILE}" "${WORKSTREAMS_PATTERN_FILE_BAK}" + cp -rf "${WW_PATTERN_FILE}" "${WW_PATTERN_FILE_BAK}" + fi } -# End - Modify DECISIONS pattern yaml according pattent/components selected - ################################################ #### Begin - Main step for install operator #### ################################################ -rm -rf $TMEP_FOLDER >/dev/null 2>&1 -rm -rf $BAK_FOLDER >/dev/null 2>&1 - -mkdir -p $TMEP_FOLDER >/dev/null 2>&1 -mkdir -p $BAK_FOLDER >/dev/null 2>&1 -cp -rf "${OPERATOR_FILE}" "${OPERATOR_FILE_BAK}" -cp -rf "${OPERATOR_PVC_FILE}" "${OPERATOR_PVC_FILE_BAK}" -cp -rf "${CONTENT_PATTERN_FILE}" "${CONTENT_PATTERN_FILE_BAK}" -cp -rf "${APPLICATION_PATTERN_FILE}" "${APPLICATION_PATTERN_FILE_BAK}" -cp -rf "${ACA_PATTERN_FILE}" "${ACA_PATTERN_FILE_BAK}" -cp -rf "${WORKSTREAMS_PATTERN_FILE}" "${WORKSTREAMS_PATTERN_FILE_BAK}" -cp -rf "${DECISIONS_PATTERN_FILE}" "${DECISIONS_PATTERN_FILE_BAK}" prompt_license + input_information + show_summary while true; do @@ -1006,37 +2588,22 @@ while true; do printf "\n" echo -e "\x1B[1mInstalling the Cloud Pak for Automation operator...\x1B[0m" printf "\n" - - if [ "$use_entitlement" = "no" ] ; then - create_secret_local_registry - else - create_secret_entitlement_registry + if [[ "${INSTALLATION_TYPE}" == "new" ]]; then + if [ "$use_entitlement" = "no" ] ; then + create_secret_local_registry + else + create_secret_entitlement_registry + fi + if [[ $1 == "review" ]]; then + echo -e "\x1B[1mReview mode running, just generate final CR, will not deploy operator\x1B[0m" + read -rsn1 -p"Press any key to continue";echo + else + allocate_operator_pvc + apply_cp4a_operator + copy_jdbc_driver + fi fi - allocate_operator_pvc - apply_cp4a_operator - copy_db2_jdbc - case $PATTERN_SELECTED in - "FileNet Content Manager") - apply_content_pattern_cr - break - ;; - "Automation Content Analyzer") - apply_aca_pattern_cr - break - ;; - "Operational Decision Manager") - apply_decisions_pattern_cr - break - ;; - "Automation Applications") - apply_application_pattern_cr - break - ;; - "Automation Workstream Services") - apply_workstreams_pattern_cr - break - ;; - esac + apply_pattern_cr break ;; "n"|"N"|"no"|"No"|"NO"|*) @@ -1044,47 +2611,91 @@ while true; do printf "\n" show_summary printf "\n" - printf "\x1B[1mEnter the number from 1 to 8 that you want to change: \x1B[0m" + if [[ $PLATFORM_SELECTED == "OCP" ]]; + then + printf "\x1B[1mEnter the number from 1 to 8 that you want to change: \x1B[0m" + else + printf "\x1B[1mEnter the number from 1 to 7 that you want to change: \x1B[0m" + fi + read -rp "" ans - case "$ans" in - "1") - select_pattern + if [[ $PLATFORM_SELECTED == "OCP" ]]; + then + case "$ans" in + "1") + select_pattern + select_optional_component + break + ;; + "2") + select_optional_component + break + ;; + "3") + get_entitlement_registry + break + ;; + "4") + get_local_registry_server + break + ;; + "5") + get_local_registry_user + break + ;; + "6") + get_local_registry_password + break + ;; + "7") + get_infra_name + break + ;; + "8") + get_storage_class_name + break + ;; + *) + echo -e "\x1B[1mEnter a valid number [1 to 8] \x1B[0m" + ;; + esac + else + case "$ans" in + "1") + select_pattern - select_optional_component - break - ;; - "2") - select_optional_component - break - ;; - "3") - get_entitlement_registry - break - ;; - "4") - get_local_registry_server - break - ;; - "5") - get_local_registry_user - break - ;; - "6") - get_local_registry_password - break - ;; - "7") - get_infra_name - break - ;; - "8") - get_storage_class_name - break - ;; - *) - echo -e "\x1B[1mEnter a valid number [1 to 8] \x1B[0m" - ;; - esac + select_optional_component + break + ;; + "2") + select_optional_component + break + ;; + "3") + get_entitlement_registry + break + ;; + "4") + get_local_registry_server + break + ;; + "5") + get_local_registry_user + break + ;; + "6") + get_local_registry_password + break + ;; + "7") + get_storage_class_name + break + ;; + *) + echo -e "\x1B[1mEnter a valid number [1 to 7] \x1B[0m" + ;; + esac + fi done show_summary ;; diff --git a/scripts/cp4a-post-deployment.sh b/scripts/cp4a-post-deployment.sh index 589795b3..ce53ac92 100755 --- a/scripts/cp4a-post-deployment.sh +++ b/scripts/cp4a-post-deployment.sh @@ -12,17 +12,47 @@ ############################################################################### CUR_DIR=$(cd $(dirname $0); pwd) PARENT_DIR=$(dirname "$PWD") -TMEP_FOLDER=${CUR_DIR}/.tmp + +FINAL_CR_FOLDER=${CUR_DIR}/generated-cr +PATTERN_ARR=() +OPT_COMPONENT_ARR=() +function set_global_env_vars() { + readonly unameOut="$(uname -s)" + case "${unameOut}" in + Linux*) readonly machine="Linux";; + Darwin*) readonly machine="Mac";; + *) readonly machine="UNKNOWN:${unameOut}" + esac + + if [[ "$machine" == "Mac" ]]; then + YQ_CMD=${CUR_DIR}/helper/yq/yq_darwin_amd64 + else + YQ_CMD=${CUR_DIR}/helper/yq/yq_linux_amd64 + fi +} + +set_global_env_vars # 1Q only supports single pattern, 2Q will search dedicated file to support multiple pattern -CMD="find $TMEP_FOLDER -maxdepth 1 -name \"*ibm_cp4a_cr_demo_*\" -print" +CMD="find $FINAL_CR_FOLDER -maxdepth 1 -name \"*ibm_cp4a_cr_final*\" -print" if $CMD ; then echo -e "\x1B[1mShowing the access information and User credentials\x1B[0m" - - pattern_file=$(find $TMEP_FOLDER -maxdepth 1 -name "*ibm_cp4a_cr_demo_*" -print) - pattern_name=$(grep -A1 'shared_configuration:' $pattern_file | tail -n1); pattern_name=${pattern_name//*sc_deployment_patterns: /} - metadata_name=$(grep -A1 'metadata:' $pattern_file | tail -n1); metadata_name=${metadata_name//*name: /} - optional_components=$(grep -A2 'shared_configuration:' $pattern_file | tail -n1); optional_components=${optional_components//*sc_optional_components: /}; temp="${optional_components%\"}"; temp="${temp#\"}"; optional_components="$temp" + + pattern_file=$(find $FINAL_CR_FOLDER -maxdepth 1 -name "*ibm_cp4a_cr_final*" -print) + # pattern_name=$(grep -A1 'shared_configuration:' $pattern_file | tail -n1); pattern_name=${pattern_name//*sc_deployment_patterns: /} + pattern_name=$(${YQ_CMD} r $pattern_file spec.shared_configuration.sc_deployment_patterns) + OIFS=$IFS + IFS=',' read -r -a PATTERN_ARR <<< "$pattern_name" + IFS=$OIFS + + # metadata_name=$(grep -A1 'metadata:' $pattern_file | tail -n1); metadata_name=${metadata_name//*name: /} + metadata_name=$(${YQ_CMD} r $pattern_file metadata.name) + optional_components=$(${YQ_CMD} r $pattern_file spec.shared_configuration.sc_optional_components) + deployment_type=$(${YQ_CMD} r $pattern_file spec.shared_configuration.sc_deployment_type) + OIFS=$IFS + IFS=',' read -r -a OPT_COMPONENT_ARR <<< "$optional_components" + IFS=$OIFS + else echo -e "\x1B[1;31mPlease run cp4a-deployment.sh script to deploy pattern firstly\x1B[0m" exit 1 @@ -35,12 +65,19 @@ function validate_cli(){ exit 1 } +function containsElement () { + local e match="$1" + shift + for e; do [[ "$e" == "$match" ]] && return 0; done + return 1 +} + function display_content_routes_credentials() { echo - echo "Below are the available routes for FileNet Content Manager:"    + echo "Below are the available routes for FileNet Content Manager:"    echo "==========================================================:" echo - oc get routes + oc get routes echo echo echo -e "\x1B[1mYou can access ACCE and Navigator via the following URLs:\x1B[0m" @@ -52,118 +89,335 @@ function display_content_routes_credentials() { echo echo -n "ACCE usename: "; oc get secret ibm-fncm-secret -o jsonpath='{ .data.appLoginUsername}' | base64 -d; echo echo -n "ACCE user password: "; oc get secret ibm-fncm-secret -o jsonpath='{ .data.appLoginPassword}' | base64 -d; echo - echo + echo echo -n "Navigator usename: "; oc get secret ibm-ban-secret -o jsonpath='{ .data.appLoginUsername}' | base64 -d; echo echo -n "Navigator user password: "; oc get secret ibm-ban-secret -o jsonpath='{ .data.appLoginPassword}' | base64 -d; echo } -function display_workstreams_routes_credentials() { +function display_workflow_workstreams_routes_credentials() { echo - echo "Below are the available routes for Automation Workstream Services:"    + if [[ $item == "workflow-workstreams" ]]; then + echo "Below are the available routes for Business Automation Workflow & Workstreams:" + fi + if [[ $item == "workflow" ]]; then + echo "Below are the available routes for Business Automation Workflow:" + fi + if [[ $item == "workstreams" ]]; then + echo "Below are the available routes for Business Automation Workstreams:" + fi echo "=================================================================:" echo - oc get routes + oc get routes echo echo - echo -e "\x1B[1mYou can access Automation Workstream Services via the following URL:\x1B[0m" - echo -e "https://$(oc get routes --no-headers | grep navigator-route | awk {'print $2'})/navigator" + echo -e "\x1B[1mYou can access Process Federated Server to see federated workflow servers via the following URL:\x1B[0m" + echo -e "https://$(oc get routes --no-headers | grep pfs-route | awk {'print $2'})/rest/bpm/federated/v1/systems" echo - echo "User credentials:" - echo "================" + if [[ $item == "workflow-workstreams" ]]; then + echo -e "\x1B[1mYou can access Business Automation Workflow Portal, Case Client, and Workstreams via the following URLs:\x1B[0m" + fi + if [[ $item == "workflow" ]]; then + echo -e "\x1B[1mYou can access Business Automation Workflow Portal, Case Client via the following URLs:\x1B[0m" + fi + if [[ $item == "workstreams" ]]; then + echo -e "\x1B[1mYou can access Business Automation Workstreams via the following URLs:\x1B[0m" + fi + if [[ $item == "workflow" || $item == "workflow-workstreams" ]]; then + echo -e "https://$(oc get routes --no-headers | grep baw-server | awk {'print $2'})/ProcessPortal" + echo -e "https://$(oc get routes --no-headers | grep navigator-route | awk {'print $2'})/navigator?desktop=baw" + fi + if [[ $item == "workstreams" || $item == "workflow-workstreams" ]]; then + echo -e "https://$(oc get routes --no-headers | grep navigator-route | awk {'print $2'})/navigator?desktop=IBMWorkplace1" + fi echo - echo -n "Navigator usename: "; oc get secret ibm-fncm-secret -o jsonpath='{ .data.appLoginUsername}' | base64 -d; echo - echo -n "Navigator user password: "; oc get secret ibm-fncm-secret -o jsonpath='{ .data.appLoginPassword}' | base64 -d; echo + if [[ $deployment_type == "demo" ]]; then + echo "User credentials:" + echo "================" + echo + echo -n "Default administrator username: "; echo "cp4admin" + echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + fi } function display_application_routes_credentials() { echo - echo "Below are the available routes for Automation Applications:"    + echo "Below are the available routes for Business Automation Application:"    echo "==========================================================:" echo - oc get routes + oc get routes echo echo - echo -e "\x1B[1mYou can access Business Automation Studio and Navigator via the following URLs:\x1B[0m" - echo -e "https://$(oc get routes --no-headers | grep bastudio-route | awk {'print $2'})/BAStudio" + echo -e "\x1B[1mYou can access Navigator via the following URLs:\x1B[0m" echo -e "https://$(oc get routes --no-headers | grep navigator-route | awk {'print $2'})/navigator" + bastudio_install=$(${YQ_CMD} r $pattern_file spec.bastudio_configuration) + if [[ " ${OPT_COMPONENT_ARR[@]} " =~ "app_designer" || (-n "$bastudio_install") ]]; then + echo -e "\x1B[1mYou can access Business Automation Studio via the following URLs:\x1B[0m" + echo -e "https://$(oc get routes --no-headers | grep bastudio-route | awk {'print $2'})/BAStudio" + fi echo - echo "User credentials:" - echo "================" - echo - echo -n "Default administrator username: "; echo "cp4admin" - echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + if [[ $deployment_type == "demo" ]]; then + echo "User credentials:" + echo "================" + echo + echo -n "Default administrator username: "; echo "cp4admin" + echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + fi } function display_contentanalyzer_routes_credentials() { echo - echo "Below are the available routes for Automation Content Analyzer:"    + echo "Below are the available routes for Automation Content Analyzer:"    echo "==============================================================:" echo oc get routes echo echo - echo -e "\x1B[1mYou can access Automation Content Analyzer via the following URLs:\x1B[0m" + echo -e "\x1B[1mYou can access Automation Content Analyzer via the following URLs:\x1B[0m" echo -e "https://$(oc get routes --no-headers | grep spbackend | awk {'print $2'})" - echo -e "https://$(oc get routes --no-headers | grep spfrontend | awk {'print $2'})/?tid=ont1&ont=ONT1" - echo - echo "User credentials:" - echo "================" echo - echo -n "Default administrator username: "; echo "cp4admin" - if [ "$optional_components" == "ums" ] || [ "$optional_components" == "ldap" ]; then - echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" - else - echo -n "Default administrator password: "; echo "" + if [[ $deployment_type == "demo" ]]; then + echo -e "https://$(oc get routes --no-headers | grep spfrontend | awk {'print $2'})/?tid=ont1&ont=ONT1" + echo "User credentials:" + echo "================" + echo + echo -n "Default administrator username: "; echo "cp4admin" + echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + elif [[ $deployment_type == "enterprise" ]]; then + echo -e "https://$(oc get routes --no-headers | grep spfrontend | awk {'print $2'})/?tid=&ont=" + echo -e "NOTE: You must replace the with tenant ID and the ontology values created during the Tenant DB initialization steps" + echo -e "You can logon to Content Analyzer with the user used when creating the Content Analyzer's tenant database" + echo fi + echo -e "\x1B[1mYou can access Business Automation Studio via the following URLs:\x1B[0m" + echo -e "https://$(oc get routes --no-headers | grep bastudio-route | awk {'print $2'})/BAStudio" } function display_decisions_routes_credentials() { echo - echo "Below are the available routes for Operational Decision Manager:"    + echo "Below are the available routes for Operational Decision Manager:"    echo "===============================================================:" echo oc get routes -l app=ibm-odm-prod echo echo - echo -e "\x1B[1mYou can access the Business Console and the Decision Server Console via the following URLs::\x1B[0m" - echo -e "Use https://$(oc get routes --no-headers | grep odm-dc-route | awk {'print $2'} ) to access, the Business Console" - echo -e "Use https://$(oc get routes --no-headers | grep odm-ds-console-route | awk {'print $2'}) to access the Decision Server Console" - echo - echo -e "In order to access these routes from your workstation/laptop, update your local 'host' file (e.g., /private/etc/hosts on Mac or" - echo -e "c:/windows/system32/drivers/etc/hosts on Windows) with the IP address of the OCP infrastructure node and the name of the route." - echo -e "For example:" - echo -e "xxx.xxx.xxx.xxx - decisions-odm-dc-route-odm-p.router.default.svc.cluster.local" - echo -e "xxx.xxx.xxx.xxx - decisions-odm-ds-console-route-odm-p.router.default.svc.cluster.local" - echo -e "where xxx.xxx.xxx.xxx is the IP address of the OCP infrastructure node" + isDsrEnabled=$(${YQ_CMD} r $pattern_file spec.odm_configuration.decisionServerRuntime.enabled) + isDrEnabled=$(${YQ_CMD} r $pattern_file spec.odm_configuration.decisionRunner.enabled) + if [ $(${YQ_CMD} r $pattern_file spec.odm_configuration.decisionCenter.enabled) == true ]; then + echo -e "Use \x1B[1mhttps://$(oc get routes --field-selector metadata.name=${metadata_name}-odm-dc-route --no-headers | awk {'print $2'})/decisioncenter \x1B[0m to access, the Decision Center console" + fi + if [[ $isDsrEnabled == true || $isDrEnabled == true ]]; then + echo -e "Use \x1B[1m https://$(oc get routes --field-selector metadata.name=${metadata_name}-odm-ds-console-route --no-headers | awk {'print $2'})\x1B[0m to access the Decision Server Console" + fi + if [[ $isDsrEnabled == true ]]; then + echo -e "Use \x1B[1m https://$(oc get routes --field-selector metadata.name=${metadata_name}-odm-ds-runtime-route --no-headers | awk {'print $2'})\x1B[0m endpoint to invoke the Decision Server Runtime" + fi + if [[ $isDrEnabled == true ]]; then + echo -e "Use \x1B[1m https://$(oc get routes --field-selector metadata.name=${metadata_name}-odm-dr-route --no-headers | awk {'print $2'})\x1B[0m endpoint to invoke the Decision Runner" + fi + + + if [[ ! "$optional_components" =~ "ums" ]]; then echo echo "User credentials:" echo "================" echo echo -n "Default administrator username: "; echo "odmAdmin" echo -n "Default administrator password: "; echo "odmAdmin" + fi +} + +function display_decisions_ads_routes_credentials() { + echo + echo "Below are the available routes for Automation Decision Services:" + echo "================================================================" + echo + oc get routes -l app.kubernetes.io/component=ads + echo + if [[ " ${OPT_COMPONENT_ARR[@]} " =~ "ads_designer" ]]; then + echo -e "\x1B[1mYou can access ADS Designer via the Business Automation Studio URL:\x1B[0m" + + echo -e "https://$(oc get routes --no-headers | grep bastudio-route | awk {'print $2'})/BAStudio" + echo + if [[ ${deployment_type} == "demo" ]]; then + echo "User credentials:" + echo "================" + echo + echo -n "Default administrator username: "; echo "cp4admin" + echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + echo + fi + fi + if [[ " ${OPT_COMPONENT_ARR[@]} " =~ "ads_runtime" ]]; then + echo -e "\x1B[1mYou can access ADS Runtime swagger URL:\x1B[0m" + + echo -e "https://$(oc get routes --no-headers | grep runtime-service | awk {'print $2'})/api/swagger-ui" + echo + echo "User credentials (for execution):" + echo "=================================" + echo + echo -n "username: "; oc get secret ibm-dba-ads-runtime-secret -o jsonpath='{ .data.decisionRuntimeUser}' | base64 -d; echo + echo -n "password: "; oc get secret ibm-dba-ads-runtime-secret -o jsonpath='{ .data.decisionRuntimePassword}' | base64 -d; echo + echo + fi } +function display_digitalworker_routes_credentials() { + echo + echo "Below are the available routes for Automation Digital Worker:" + echo "================================================================" + echo + + oc get routes -l app=ibm-automation-digital-worker-prod + echo + + echo -e "\x1B[1mYou can access ADW Designer via the Business Automation Studio URL:\x1B[0m" + + echo -e "https://$(oc get routes --no-headers | grep bastudio-route | awk {'print $2'})/BAStudio" + echo + if [[ ${deployment_type} == "demo" ]]; then + echo "User credentials:" + echo "================" + echo + echo -n "Default administrator username: "; echo "cp4admin" + echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + echo + fi + echo -e "\x1B[1mYou can access ADW Runtime URL:\x1B[0m" + + echo -e "https://$(oc get routes --no-headers | grep ${metadata_name}-adw-runtime-route | awk {'print $2'})" + echo + if [[ ${deployment_type} == "demo" ]]; then + echo "User credentials (for execution):" + echo "=================================" + echo + echo -n "Default administrator username: "; echo "cp4admin" + echo -n "Default administrator password: "; pwd=$(oc get cm "${metadata_name}-openldap-customldif" -o yaml |grep "userpassword: " | head -n1); pwd=${pwd//*userpassword: /}; echo "$pwd" + echo + fi +} + +function display_bai_routes_credentials() { + echo + echo "Below are the available routes for Business Automation Insights:"    + echo "===============================================================:" + echo + oc get routes -l app=ibm-business-automation-insights + + echo -e "\x1B[1mYou can access Business Performance Center via the following URL:\x1B[0m" + echo -e "https://$(oc get routes --no-headers | grep ${metadata_name}-bai-business-performance-center-route | awk {'print $2'})" + echo + if [[ $deployment_type == "demo" ]]; then + echo "User credentials:" + echo "================" + echo + echo -n "Default username: "; echo "user1" + echo -n "Default password: "; pwd=$( oc get cm "${metadata_name}-openldap-customldif" -o yaml | grep "userpassword: " | head -n2); pwd=${pwd//*userpassword: /}; echo "$pwd" + echo + fi + + echo -e "\x1B[1mYou can access Kibana via the following URL:\x1B[0m" + echo -e "https://$(oc get routes --no-headers | grep ${metadata_name}-bai-kibana-route | awk {'print $2'})" + echo + echo "User credentials:" + echo "================" + echo + ek_secret=$(${YQ_CMD} r $pattern_file spec.bai_configuration.ekSecret) + if [[ -z $ek_secret || $ek_secret == null ]]; then + echo "Default username: admin"; + echo "Default password: passw0rd"; + else + echo -n "Username: "; oc get $ek_secret -o jsonpath='{ .data.elasticsearch-username}' | base64 -d; echo + echo -n "Password: "; oc get $ek_secret -o jsonpath='{ .data.elasticsearch-password}' | base64 -d; echo + fi + echo + + echo -e "\x1B[1mYou can access Admin API via the following URL:\x1B[0m" + echo -e "https://$(oc get routes --no-headers | grep ${metadata_name}-bai-admin-route | awk {'print $2'})" + echo + echo "User credentials:" + echo "================" + echo + bai_secret=$(${YQ_CMD} r $pattern_file spec.bai_configuration.baiSecret) + if [[ -z $bai_secret || $bai_secret == null ]]; then + echo -n "Default username: "; oc get cm ${metadata_name}-bai-env -o jsonpath='{ .data.admin-username}'; echo + echo -n "Default password: "; oc get secret ${metadata_name}-bai-secrets -o jsonpath='{ .data.admin-password}' | base64 -d; echo + else + echo -n "Username: "; oc get secret $bai_secret -o jsonpath='{ .data.admin-username}' | base64 -d; echo + echo -n "Password: "; oc get secret $bai_secret -o jsonpath='{ .data.admin-password}' | base64 -d; echo + fi + + kafka_configuration=$(${YQ_CMD} r $pattern_file spec.shared_configuration.kafka_configuration) + if [[ -z $kafka_configuration || $kafka_configuration == null ]]; then + echo + echo -e "\x1B[1mThere is no Kafka client configuration provided.\x1B[0m" + echo + else + echo + echo -e "\x1B[1mYou can configure Kafka client with the following configuration information:\x1B[0m" + echo + echo -n "Bootstrap servers: "; ${YQ_CMD} r $pattern_file spec.shared_configuration.kafka_configuration.bootstrap_servers; + echo -n "Security protocol: "; ${YQ_CMD} r $pattern_file spec.shared_configuration.kafka_configuration.security_protocol; + echo -n "SASL mechanism: "; ${YQ_CMD} r $pattern_file spec.shared_configuration.kafka_configuration.sasl_mechanism; + kafka_connection_secret=$(${YQ_CMD} r $pattern_file spec.shared_configuration.kafka_configuration.connection_secret_name) + if [[ -z $kafka_connection_secret || $kafka_connection_secret == null ]]; then + echo "The Kafka server doesn't require authentication." + else + echo -n "Username: "; oc get secret $kafka_connection_secret -o jsonpath='{ .data.kafka-username}' | base64 -d; echo + echo -n "Password: "; oc get secret $kafka_connection_secret -o jsonpath='{ .data.kafka-password}' | base64 -d; echo + echo -n "Server certificate: "; oc get secret $kafka_connection_secret -o jsonpath='{ .data.kafka-server-certificate}' | base64 -d; echo + fi + fi +} validate_cli # The script should check the .tmp directory for the CR that is being used and determine which pattern is deployed and call the correct function. -# Or the script should just ask the user which pattern was deployed, and then call the necessary function for that pattern. +# Or the script should just ask the user which pattern was deployed, and then call the necessary function for that pattern. # 1Q only supports single pattern, 2Q will search dedicated file to support multiple pattern -case "$pattern_name" in - content) - display_content_routes_credentials - ;; - workstreams) - display_workstreams_routes_credentials - ;; - application) - display_application_routes_credentials - ;; - contentanalyzer) - display_contentanalyzer_routes_credentials - ;; - decisions) - display_decisions_routes_credentials - ;; -esac - + +for item in "${PATTERN_ARR[@]}"; do + while true; do + case "$item" in + "content") + display_content_routes_credentials + break + ;; + "workflow"|"workstreams"|"workflow-workstreams") + display_workflow_workstreams_routes_credentials + break + ;; + "application") + display_application_routes_credentials + break + ;; + "contentanalyzer") + display_contentanalyzer_routes_credentials + break + ;; + "decisions") + display_decisions_routes_credentials + break + ;; + "decisions_ads") + display_decisions_ads_routes_credentials + break + ;; + "digitalworker") + display_digitalworker_routes_credentials + break + ;; + "foundation") + break + ;; + esac + done +done + +for item in "${OPT_COMPONENT_ARR[@]}"; do + case "$item" in + "bai") + display_bai_routes_credentials + break + ;; + esac +done diff --git a/scripts/deployOperator.sh b/scripts/deployOperator.sh index db6ded01..e78bbfe4 100755 --- a/scripts/deployOperator.sh +++ b/scripts/deployOperator.sh @@ -9,6 +9,11 @@ # disclosure restricted by GSA ADP Schedule Contract with IBM Corp. # ############################################################################### +# This script need to be executed under root path cert-kubernetes +CUR_DIR=$(pwd) +PLATFORM_VERSION="" +source ${CUR_DIR}/scripts/helper/common.sh +check_platform_version function show_help { echo -e "\nUsage: deployOperator.sh -i operator_image [-p 'secret_name']\n" @@ -17,6 +22,7 @@ function show_help { echo " -i Operator image name" echo " For example: cp.icr.io/cp/icp4a-operator:20.0.1 or registry_url/icp4a-operator:version" echo " -p Optional: Pull secret to use to connect to the registry" + echo " -n The namespace to deploy Operator" echo " -a Accept IBM license" } @@ -25,7 +31,7 @@ then show_help exit -1 else - while getopts "h?i:p:a:" opt; do + while getopts "h?i:p:n:a:" opt; do case "$opt" in h|\?) show_help @@ -35,6 +41,8 @@ else ;; p) PULLSECRET=$OPTARG ;; + n) NAMESPACE=$OPTARG + ;; a) LICENSE_ACCEPTED=$OPTARG ;; :) echo "Invalid option: -$OPTARG requires an argument" @@ -48,6 +56,17 @@ fi [ -f ./deployoperator.yaml ] && rm ./deployoperator.yaml cp ./descriptors/operator.yaml ./deployoperator.yaml +[ -f ./cluster_role_binding.yaml ] && rm ./cluster_role_binding.yaml +cp ./descriptors/cluster_role_binding.yaml ./cluster_role_binding.yaml + +# Uncomment runAsUser for OCP 3.11 +function ocp311_special(){ + if [[ ${PLATFORM_VERSION} == "3.11" ]]; then + oc adm policy add-scc-to-user privileged -z ibm-cp4a-operator -n ${NAMESPACE} + sed -e 's/\# runAsUser\: 1001/runAsUser\: 1001/g' ./deployoperator.yaml > ./deployoperatorsav.yaml ; mv ./deployoperatorsav.yaml ./deployoperator.yaml + fi +} + # Show license file function readLicense() { echo -e "\033[32mYou need to read the International Program License Agreement before start\033[0m" @@ -78,6 +97,8 @@ fi if [[ $LICENSE_ACCEPTED == "accept" ]]; then sed -e '/dba_license/{n;s/value:/value: accept/;}' ./deployoperator.yaml > ./deployoperatorsav.yaml ; mv ./deployoperatorsav.yaml ./deployoperator.yaml + sed -e '/baw_license/{n;s/value:/value: accept/;}' ./deployoperator.yaml > ./deployoperatorsav.yaml ; mv ./deployoperatorsav.yaml ./deployoperator.yaml + sed -e "s||$NAMESPACE|g" ./cluster_role_binding.yaml > ./cluster_role_binding_temp.yaml ; mv ./cluster_role_binding_temp.yaml ./cluster_role_binding.yaml if [ ! -z ${IMAGEREGISTRY} ]; then # Change the location of the image @@ -97,6 +118,13 @@ if [[ $LICENSE_ACCEPTED == "accept" ]]; then kubectl apply -f ./descriptors/service_account.yaml --validate=false kubectl apply -f ./descriptors/role.yaml --validate=false kubectl apply -f ./descriptors/role_binding.yaml --validate=false + kubectl apply -f ./descriptors/cluster_role.yaml --validate=false + kubectl apply -f ./cluster_role_binding.yaml --validate=false + + + # Uncomment runAsUser: 1001 for OCP 3.11 + ocp311_special + kubectl apply -f ./deployoperator.yaml --validate=false echo -e "\033[32mAll descriptors have been successfully applied. Monitor the pod status with 'kubectl get pods -w'.\033[0m" else diff --git a/scripts/deploy_CS3.3.sh b/scripts/deploy_CS3.3.sh new file mode 100755 index 00000000..efb77d74 --- /dev/null +++ b/scripts/deploy_CS3.3.sh @@ -0,0 +1,367 @@ +#!/bin/bash +# set -x +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### +CUR_DIR=$(pwd) +if [ -n "$(echo $CUR_DIR | grep scripts)" ]; then + PARENT_DIR=$(dirname "$PWD") +else + PARENT_DIR=$CUR_DIR +fi +TEMP_FOLDER=${CUR_DIR}/.tmp +LOG_FILE=${CUR_DIR}/CS_prepare_install33.log + +#product_install=${1:-nonbai} + +function func_operand_request_cr_bai_33() +{ + + echo "Creating Common Services V3.3 Operand Request for BAI deployments on OCP 4.2+ ..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest33_cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service +spec: + requests: + - registry: common-service + operands: + - name: ibm-cert-manager-operator + - name: ibm-mongodb-operator + - name: ibm-iam-operator + - name: ibm-monitoring-exporters-operator + - name: ibm-monitoring-prometheusext-operator + - name: ibm-monitoring-grafana-operator + - name: ibm-management-ingress-operator + - name: ibm-licensing-operator + - name: ibm-metering-operator + - name: ibm-commonui-operator +ENDF +} + + +# Deploy CS 3.3 if OCP 4.2 or 3.11 as per requirements. +# The components for CS 3.3 in this case will only be Licensing and Metering (also CommonUI as a base requirment) + +function func_operand_request_cr_nonbai_33() +{ + +echo "$(date) Creating Common Services V3.3 Request Operand for non-BAI deployments on OCP 3.11, 4.2+" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest33_cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service +spec: + requests: + - registry: common-service + operands: + - name: ibm-cert-manager-operator + - name: ibm-mongodb-operator + - name: ibm-iam-operator + - name: ibm-management-ingress-operator + - name: ibm-licensing-operator + - name: ibm-metering-operator + - name: ibm-commonui-operator +ENDF +} + +function install_common_service_33(){ + echo + echo -e "The installation of Common Services Relase 3.3 has started..." >> ${LOG_FILE} + create_project_cs + apply_cs_operator_source + func_crd_rbac + func_install_odlm + sleep 20 + func_wait + func_operand_request_cr + # func_operand_config_cr + # func_operand_registry + # func_wait2 + echo "waiting on csv's to be ready..." + sleep + func_check_csv + set_mongodb_single_copy + #func_check_statefulset + #func_check_jobs + sleep 20 + echo -e "Done" +} + + +function create_project_cs() { + project_name="ibm-common-services" + isProjExists=`oc get project $project_name --ignore-not-found | wc -l` >/dev/null 2>&1 + + if [ $isProjExists -ne 2 ] ; then + oc new-project ${project_name} >> ${LOG_FILE} + returnValue=$? + if [ "$returnValue" == 1 ]; then + echo -e "\x1B[1mInvalid project name, please enter a valid name...\x1B[0m" >> ${LOG_FILE} + project_name="" + else + echo -e "\x1B[1mCreate project ${project_name}...\x1B[0m" >> ${LOG_FILE} + fi + else + echo -e "$date : A Previous Installation & Project \"${project_name}\" already exists..please cleanup existing deployment and resume!!.." >> ${LOG_FILE} + exit + fi + PROJ_NAME=${project_name} +} + + + +# create common-services operator source +function apply_cs_operator_source () { + +echo "Applying common-services operator source...." >> ${LOG_FILE} +operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_source.yaml +oc apply -f ${operator_source_path} >> ${LOG_FILE} +sleep 10 +} + +# create Common Services crd and rbac +function func_crd_rbac () { + + echo "Creating Common-Services OLDM CRDs.....\x1B[0m" >> ${LOG_FILE} + + + oc apply -f https://raw.githubusercontent.com/IBM/operand-deployment-lifecycle-manager/release-1.1/deploy/crds/operator.ibm.com_operandregistries_crd.yaml >> ${LOG_FILE} + oc apply -f https://raw.githubusercontent.com/IBM/operand-deployment-lifecycle-manager/release-1.1/deploy/crds/operator.ibm.com_operandconfigs_crd.yaml >> ${LOG_FILE} + oc apply -f https://raw.githubusercontent.com/IBM/operand-deployment-lifecycle-manager/release-1.1/deploy/crds/operator.ibm.com_operandrequests_crd.yaml >> ${LOG_FILE} + sleep 20 + + + oc apply -f https://raw.githubusercontent.com/IBM/operand-deployment-lifecycle-manager/release-1.1/deploy/service_account.yaml >> ${LOG_FILE} + oc apply -f https://raw.githubusercontent.com/IBM/operand-deployment-lifecycle-manager/release-1.1/deploy/role.yaml >> ${LOG_FILE} + oc apply -f https://raw.githubusercontent.com/IBM/operand-deployment-lifecycle-manager/release-1.1/deploy/role_binding.yaml >> ${LOG_FILE} + sleep 20 +} + +function func_install_odlm () { + oc project ibm-common-services + echo "Creating Common-Services operator......" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator.yaml + oc apply -f ${operator_source_path} >> ${LOG_FILE} + while ! oc get deployments/operand-deployment-lifecycle-manager -n "ibm-common-services" | egrep "1/1|2/2|3/3"; do + echo "Waiting for odlm deployment to complete..." >> ${LOG_FILE} + sleep 5 + done + sleep 20 +} + +function func_wait() { + + echo "Waiting for odlm deployment to complete.." >> ${LOG_FILE} + while ! oc get deployments/operand-deployment-lifecycle-manager -n "ibm-common-services" | egrep "1/1|2/2|3/3"; do + echo "Waiting for odlm deployment to complete..." >> ${LOG_FILE} + sleep 5 + done + + +while ! oc get deployments --all-namespaces | egrep -i operand-deployment-lifecycle-manager | egrep "1/1|2/2|3/3"; do + echo "Waiting for odlm..." + sleep 10 + done + + +echo "CatalogSource:" + while ! oc project ibm-common-services 2>/dev/null; do + echo -e " wait for namespace $NAMESPACE to appear" + sleep 10 + done + + echo "OperandRegistry:" +while ! oc get operandregistry | egrep -qi common-service; do + echo -e " wait for operandregistry to appear" + sleep 10 + done + + echo "OperandConfig:" + while ! oc get operandconfig | egrep -qi common-service; do + echo -e " wait for operandconfig to appear" + sleep 10 + done + +} + +#create waits after operandrequest +func_wait2 () { + echo "OperandRequest:" + NAMESPACE="ibm-common-services" + + while ! oc get operandrequest -n $NAMESPACE | egrep -qi common-service; do + echo -e " wait for operandrequest to appear" + sleep 10 + done + + echo "OperatorGroup:" + while ! oc get operatorgroup -n $NAMESPACE | egrep -qi operand; do + echo -e " wait for operatorgroup to appear" + sleep 10 + done + + echo "Subscriptions:" + while ! oc get subscriptions -n $NAMESPACE | egrep -qi ibm; do + echo -e " wait for subscription to appear" + sleep 10 + done + + echo "CSV:" + while ! oc get csv -n $NAMESPACE | egrep -qi ibm; do + echo -e " wait for csv to appear" + sleep 30 + done +} + + +# check that csv are good +func_check_csv () { + NAMESPACE="ibm-common-services" + ROUND=0 + #func_link_secret_delete_pod + while oc get csv -n $NAMESPACE | egrep -v "DISPLAY|Succeeded"; do + ROUND=$((ROUND+1)) + echo "Making sure csv's are succeeded status round $ROUND..." + sleep 30 + + if [[ $ROUND -gt 40 ]]; then + echo -e " !.! csvs timeout" + break + fi + done +} + + +# check statefulsets to make sure running normal +func_check_statefulsets () { + ROUND=0 + NAMESPACE="ibm-common-services" + # func_link_secret_delete_pod + while oc get statefulset -n $NAMESPACE | egrep "0/1|0/2|1/2|0/3|1/3|2/3"; do + ROUND=$((ROUND+1)) + echo "Waiting for statefulsets round $ROUND..." >> ${LOG_FILE} + sleep 30 + #func_link_secret_delete_pod + if [[ $ROUND -gt 40 ]]; then + echo -e " !.! mongodb timeout" >> ${LOG_FILE} + break + fi + done +} + +# check job status to make sure finished +func_check_jobs () { + echo "Make sure jobs are finished" + NAMESPACE="ibm-common-services" + ROUND=0 + while oc get jobs -n $NAMESPACE | egrep "0/1|0/2|1/2|0/3|1/3|2/3"; do + echo "Waiting for jobs to finish round $ROUND..." >> ${LOG_FILE} + ROUND=$((ROUND+1)) + sleep 30 + #func_link_secret_delete_pod + if [[ $ROUND -gt 30 ]]; then + echo -e " !.! jobs timeout" >> ${LOG_FILE} + break + fi + done +} + + +# create operandrequest to install + +function func_operand_request_cr () { + oc project ibm-common-services + echo "Creating Common-Services Operand Request operator..." >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest33_cr.yaml >> ${LOG_FILE} + oc apply -f ${operator_source_path} >> ${LOG_FILE} + sleep 60 +} + +# create operandconfig to install +function func_operand_config_cr () { + oc project ibm-common-services + echo "Applying Common-Services Operand Config operator..." >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandconfig_cr.yaml + oc apply -f ${operator_source_path} + sleep 60 +} + +# create operandregistry to install +function func_operand_registry () { + oc project ibm-common-services + echo "\x1B[1mApplying Common-Services Operand Registry operator..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandregistry_cr.yaml + oc apply -f ${operator_source_path} +sleep 20 +} + + +# Deploy CS 3.3 if OCP 4.2 or 3.11 as per requirements. +# The components for CS 3.3 in this case will only be Licensing and Metering (also CommonUI as a base requirment) + +function startdeploy_cs() +{ + + echo -e "*******************************************************" >> ${LOG_FILE} + echo "$(date) non-bai deployment as default, The components for CS 3.3 in this case will only be Licensing and Metering " >> ${LOG_FILE} + func_operand_request_cr_nonbai_33 >> ${LOG_FILE} + install_common_service_33 >> ${LOG_FILE} +} + +# +# Set config so that only one MongoDB replica comes up +# + +function set_mongodb_single_copy() +{ + +index=0 + while [ $index -lt 20 ] + do + #jsonpath="{ range .spec.services[$index]}{.name}" + item=$(oc get operandconfig common-service -n ibm-common-services --ignore-not-found -o=jsonpath="{ range .spec.services["$index"]}{.name}") + + if [ "$item" = "ibm-mongodb-operator" ]; then + + oc patch operandconfig common-service -n ibm-common-services --type json \ + -p '[{"op":"replace","path":"/spec/services/'$index'/spec/mongoDB", "value":{"replicas": 1}}]' >> ${LOG_FILE} + break; + fi + index=$(( index + 1 )) + done +} + +function show_summary(){ + + PLATFORM_SELECTED="OCP" + PLATFORM_VERSION=">=4.2+ < 4.4" + project_name="ibm-common-services" + + printf "\n" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1m Summary of input \x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1;31m1. Cloud platform to deploy: ${PLATFORM_SELECTED} ${PLATFORM_VERSION}\x1B[0m" + echo -e "\x1B[1;31m2. Project to deploy: ${project_name}\x1B[0m" + echo -e "\x1B[1;31m3. CS Operators to be Installed: Licensing and Metering + dependencies \x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" +} + +#### #### +## Main Logic ## +#### #### +startdeploy_cs +show_summary >> ${LOG_FILE} + diff --git a/scripts/deploy_CS3.4.sh b/scripts/deploy_CS3.4.sh new file mode 100755 index 00000000..ded2a3c1 --- /dev/null +++ b/scripts/deploy_CS3.4.sh @@ -0,0 +1,202 @@ +#!/bin/bash +# set -x +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +##IBM Common Service Operator and ODLM operator get deployed in the “common-service” +#the individual operators get deployed in the “ibm-common-services” namespace +############################################################################### +CUR_DIR=$(pwd) +if [ -n "$(echo $CUR_DIR | grep scripts)" ]; then + PARENT_DIR=$(dirname "$PWD") +else + PARENT_DIR=$CUR_DIR +fi +TEMP_FOLDER=${CUR_DIR}/.tmp +LOG_FILE=${CUR_DIR}/cs_prepare_install34.log + +product_install=${1:-nonbai} +project_prev=${2:-common-service} + +function install_common_service_34(){ + echo -e "$(date) The installation of Common Services Release 3.4 has started" >> ${LOG_FILE} + + + apply_registry + sleep 60 + create_operator_Group + create_operator_subscription + echo "$(date) waiting on Operator..sleeping..." >> ${LOG_FILE} + sleep 120 + create_operator_request + sleep 60 + set_mongodb_single_copy + show_summary >> ${LOG_FILE} + #oc project ${project_prev} >> ${LOG_FILE} + #echo "$(date) Setting project scope back to..${project_prev}" + echo -e "Done" >> ${LOG_FILE} +} + + +function func_operand_request_cr_bai_34() +{ + + echo "$(date) Creating Common Services V3.4 Operand Request for BAI deployments on OCP 4.3+ ..\x1B[0m" >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest34cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service + namespace: ibm-common-services +spec: + requests: + - registry: common-service + registryNamespace: ibm-common-services + operands: + - name: ibm-licensing-operator + - name: ibm-iam-operator + - name: ibm-monitoring-exporters-operator + - name: ibm-monitoring-prometheusext-operator + - name: ibm-monitoring-grafana-operator + - name: ibm-metering-operator + - name: ibm-management-ingress-operator + - name: ibm-commonui-operator +ENDF +} + + +function func_operand_request_cr_nonbai_34() +{ + + echo "$(date) Creating Common-Services V3.4 Operand Request for non-BAI deployments on OCP 4.3 .." >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest34cr.yaml + cat << ENDF > ${operator_source_path} +apiVersion: operator.ibm.com/v1alpha1 +kind: OperandRequest +metadata: + name: common-service + namespace: ibm-common-services +spec: + requests: + - registry: common-service + registryNamespace: ibm-common-services + operands: + - name: ibm-licensing-operator + - name: ibm-metering-operator +ENDF +} + +function apply_registry(){ + + echo "$(date) Defining sources for Release 3.4 of IBM Common Services." >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/app_registry.yaml + oc apply -f ${operator_source_path} >> ${LOG_FILE} +} + +function create_project_cs() { + project_name="common-service" + isProjExists=`oc get project $project_name --ignore-not-found | wc -l` >/dev/null 2>&1 + + if [ $isProjExists -ne 2 ] ; then + oc new-project ${project_name} >> ${LOG_FILE} + returnValue=$? + if [ "$returnValue" == 1 ]; then + echo -e "$(date) Invalid project name, please enter a valid name..." >> ${LOG_FILE} + project_name="" + else + echo -e "$(date) Create project ${project_name}..." >> ${LOG_FILE} + fi + else + echo -e "$date : A Previous Installation & Project \"${project_name}\" already exists!!..Cleanup is required before deployment..exiting." >> ${LOG_FILE} + exit + fi + PROJ_NAME=${project_name} + +} + + +function create_operator_Group() +{ + echo "$(date) Creating Operator Group for Release 3.4..." >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_group.yaml + oc apply -f ${operator_source_path} >> ${LOG_FILE} + sleep 2 +} + +function create_operator_subscription() +{ + echo "$(date) Creating Operator Subscription for Release 3.4..." >> ${LOG_FILE} + operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_subscription.yaml + oc apply -f ${operator_source_path} >> ${LOG_FILE} + sleep 2 +} + +function create_operator_request() +{ +echo "$(date) Creating Operator Request CR for Release 3.4..." >> ${LOG_FILE} +operator_source_path=${PARENT_DIR}/descriptors/common-services/crds/operator_operandrequest34cr.yaml +oc apply -f ${operator_source_path} >> ${LOG_FILE} +sleep 30 +} + +# +# Set config so that only one MongoDB replica comes up +# + +function set_mongodb_single_copy() +{ + +index=0 + while [ $index -lt 20 ] + do + #jsonpath="{ range .spec.services[$index]}{.name}" + item=$(oc get operandconfig common-service -n ibm-common-services --ignore-not-found -o=jsonpath="{ range .spec.services["$index"]}{.name}") + + if [ "$item" = "ibm-mongodb-operator" ]; then + + oc patch operandconfig common-service -n ibm-common-services --type json \ + -p '[{"op":"replace","path":"/spec/services/'$index'/spec/mongoDB", "value":{"replicas": 1}}]' >> ${LOG_FILE} + break; + fi + index=$(( index + 1 )) + done +} + +function show_summary(){ + + PLATFORM_SELECTED="OCP" + PLATFORM_VERSION=">=4.4+" + project_name="common-services, ibm-common-services" + + printf "\n" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1m Summary of input \x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" + echo -e "\x1B[1;31m1. Cloud platform to deploy: ${PLATFORM_SELECTED} ${PLATFORM_VERSION}\x1B[0m" + echo -e "\x1B[1;31m2. Project to deploy: ${project_name}\x1B[0m" + echo -e "\x1B[1;31m3. CS Operators to be Installed: Licensing and Metering + dependencies \x1B[0m" + echo -e "\x1B[1m*******************************************************\x1B[0m" +} + + +if [ "$product_install" == "bai" ]; then + echo -e "*******************************************************" >> ${LOG_FILE} + echo "$(date) bai deployment was selected" >> ${LOG_FILE} + func_operand_request_cr_bai_34 + install_common_service_34 + +else + echo -e "*******************************************************" >> ${LOG_FILE} + echo "$(date) non-bai deployment was selected" >> ${LOG_FILE} + + func_operand_request_cr_nonbai_34 + install_common_service_34 +fi + diff --git a/scripts/helper/common.sh b/scripts/helper/common.sh new file mode 100755 index 00000000..ae8b1eaa --- /dev/null +++ b/scripts/helper/common.sh @@ -0,0 +1,138 @@ +#!/bin/bash + +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### + +# This script contains shared utility functions and environment variables. + +function set_global_env_vars() { + readonly unameOut="$(uname -s)" + case "${unameOut}" in + Linux*) readonly machine="Linux";; + Darwin*) readonly machine="Mac";; + *) readonly machine="UNKNOWN:${unameOut}" + esac + + if [[ "$machine" == "Mac" ]]; then + SED_COMMAND='sed -i ""' + SED_COMMAND_FORMAT='sed -i "" s/^M//g' + YQ_CMD=${CUR_DIR}/helper/yq/yq_darwin_amd64 + else + SED_COMMAND='sed -i' + SED_COMMAND_FORMAT='sed -i s/\r//g' + YQ_CMD=${CUR_DIR}/helper/yq/yq_linux_amd64 + fi +} + +############################ +# CLI installation utilities +############################ + +function validate_cli(){ + which oc &>/dev/null + [[ $? -ne 0 ]] && \ + echo "Unable to locate Openshift CLI, please install it first." && \ + exit 1 + + which ${YQ_CMD} &>/dev/null + [[ $? -ne 0 ]] && \ + while true; do + echo_bold "\"yq\" Command Not Found\n" + echo_bold "Please download \"yq\" binary file from cert-kubernetes repo\n" + exit 0 + done + which timeout &>/dev/null + [[ $? -ne 0 ]] && \ + while true; do + echo_bold "\"timeout\" Command Not Found\n" + echo_bold "The \"timeout\" will be installed automatically\n" + echo_bold "Do you accept (Yes/No, default: No):" + read -rp "" ans + case "$ans" in + "y"|"Y"|"yes"|"Yes"|"YES") + install_timeout_cli + break + ;; + "n"|"N"|"no"|"No"|"NO") + echo -e "You do not accept, exiting...\n" + exit 0 + ;; + *) + echo_red "You do not accept, exiting...\n" + exit 0 + ;; + esac + done +} + +function install_timeout_cli(){ + if [[ ${machine} = "Mac" ]]; then + echo -n "Installing timeout..."; brew install coreutils >/dev/null 2>&1; sudo ln -s /usr/local/bin/gtimeout /usr/local/bin/timeout >/dev/null 2>&1; echo "done."; + fi + printf "\n" +} + +function install_yq_cli(){ + if [[ ${machine} = "Linux" ]]; then + echo -n "Downloading..."; curl -LO https://github.com/mikefarah/yq/releases/download/3.2.1/yq_linux_amd64 >/dev/null 2>&1; echo "done."; + echo -n "Installing yq..."; sudo chmod +x yq_linux_amd64 >/dev/null; sudo mv yq_linux_amd64 /usr/local/bin/yq >/dev/null; echo "done."; + else + echo -n "Installing yq..."; brew install yq >/dev/null; echo "done."; + fi + printf "\n" +} + + +################### +# Echoing utilities +################### + +function echo_bold() { + # Echoes a message in bold characters + echo_impl "${1}" "m" +} + +function echo_red() { + # Echoes a message in red bold characters + echo_impl "${1}" ";31m" +} + +function echo_impl() { + # Echoes a message prefixed and suffixed by formatting characters + local MSG=${1:?Missing message to echo} + local PREFIX=${2:?Missing message prefix} + #local SUFFIX=${3:?Missing message suffix} + echo -e "\x1B[1${PREFIX}${MSG}\x1B[0m" +} + +############################ +# check OCP version +############################ +function check_platform_version(){ + res=$(kubectl get nodes | awk 'NR==2{print $5}') + if [[ $res =~ v1.11 ]]; + then + PLATFORM_VERSION="3.11" + elif [[ $res =~ v1.14.6 ]]; + then + PLATFORM_VERSION="4.2" + elif [[ $res =~ v1.16.2 ]]; + then + PLATFORM_VERSION="4.3" + elif [[ $res =~ v1.17.1 ]]; + then + PLATFORM_VERSION="4.4" + else + echo -e "\x1B[1;31mUnable to determine OCP version with node version information: $res .\x1B[0m" + fi +} + +set_global_env_vars diff --git a/scripts/helper/yq/yq_darwin_amd64 b/scripts/helper/yq/yq_darwin_amd64 new file mode 100755 index 00000000..4436477a Binary files /dev/null and b/scripts/helper/yq/yq_darwin_amd64 differ diff --git a/scripts/helper/yq/yq_linux_amd64 b/scripts/helper/yq/yq_linux_amd64 new file mode 100755 index 00000000..e521ea60 Binary files /dev/null and b/scripts/helper/yq/yq_linux_amd64 differ diff --git a/scripts/loadPrereqImages.sh b/scripts/loadPrereqImages.sh index 27fc411b..60406276 100755 --- a/scripts/loadPrereqImages.sh +++ b/scripts/loadPrereqImages.sh @@ -1,5 +1,5 @@ #!/bin/bash - +#set -x echo -e "\033[1;31mImportant! Please ensure that you had login to the target Docker registry in advance. \033[0m" echo -e "\033[1;31mImportant! The load image sample script is for x86_64, amd64, or i386 platforms only.\n \033[0m" @@ -87,22 +87,51 @@ then exit -1 fi -declare -A prereqimages=(["db2u.tools:11.5.1.0-CN1"]="docker.io/ibmcom/" - ["db2:11.5.1.0-CN1"]="docker.io/ibmcom/" - ["db2u.auxiliary.auth:11.5.1.0-CN1"]="docker.io/ibmcom/" - ["db2u.instdb:11.5.1.0-CN1"]="docker.io/ibmcom/" - ["etcd:v3.3.10"]="quay.io/coreos/" - ["openldap:1.3.0"]="osixia/" - ["busybox:latest"]="docker.io/library/" - ) +# declare -A prereqimages=(["db2u.tools:11.5.1.0-CN1"]="docker.io/ibmcom/" +# ["db2:11.5.1.0-CN1"]="docker.io/ibmcom/" +# ["db2u.auxiliary.auth:11.5.1.0-CN1"]="docker.io/ibmcom/" +# ["db2u.instdb:11.5.1.0-CN1"]="docker.io/ibmcom/" +# ["etcd:v3.3.10"]="quay.io/coreos/" +# ["openldap:1.3.0"]="osixia/" +# ["busybox:latest"]="docker.io/library/" +# ["phpldapadmin:0.9.0"]="osixia/" +# ) + prereqimages=("db2u.tools:11.5.1.0-CN1" + "db2:11.5.1.0-CN1" + "db2u.auxiliary.auth:11.5.1.0-CN1" + "db2u.instdb:11.5.1.0-CN1" + "etcd:v3.3.10" + "openldap:1.3.0" + "busybox:latest" + "phpldapadmin:0.9.0") +function getimagerepo(){ + if [[ $image == ${prereqimages[0]} ]]; then + image_repo="docker.io/ibmcom/" + elif [[ $image == ${prereqimages[1]} ]]; then + image_repo="docker.io/ibmcom/" + elif [[ $image == ${prereqimages[2]} ]]; then + image_repo="docker.io/ibmcom/" + elif [[ $image == ${prereqimages[3]} ]]; then + image_repo="docker.io/ibmcom/" + elif [[ $image == ${prereqimages[4]} ]]; then + image_repo="quay.io/coreos/" + elif [[ $image == ${prereqimages[5]} ]]; then + image_repo="osixia/" + elif [[ $image == ${prereqimages[6]} ]]; then + image_repo="docker.io/library/" + elif [[ $image == ${prereqimages[7]} ]]; then + image_repo="osixia/" + fi +} -for image in "${!prereqimages[@]}" +for image in "${prereqimages[@]}" do - image_repo=${prereqimages[${image}]} + getimagerepo origin_image=${image_repo}${image} - + + echo -e "\x1B[1mPull image: ${origin_image}.\n\x1B[0m" ${cli_cmd} pull ${origin_image} - echo "Pull image: ${origin_image} " + if [ "${cli_cmd}" = "docker" ] then echo "${cli_cmd} tag ${origin_image} ${target_docker_repo}/${image}" @@ -119,12 +148,13 @@ do then ${cli_cmd} push ${target_docker_repo}/${image} | grep -e repository -e digest -e unauthorized ${cli_cmd} rmi -f ${origin_image} ${target_docker_repo}/${image} | grep -e unauthorized - echo "Pushed image: "${target_docker_repo}/${image} + echo -e "\x1B[1mPushed image: ${target_docker_repo}/${image} \n\x1B[0m" + elif [ "${cli_cmd}" = "podman" ] then ${cli_cmd} push --tls-verify=false ${local_repo_prefix}${image} ${target_docker_repo}/${image} | grep -e repository -e digest -e unauthorized ${cli_cmd} rmi -f ${origin_image} ${local_repo_prefix}${image}| grep -e unauthorized - echo "Pushed image: "${target_docker_repo}/${image} + echo -e "\x1B[1mPushed image: ${target_docker_repo}/${image} \n\x1B[0m" fi fi done @@ -137,7 +167,7 @@ else status="push" fi echo -e "\nDocker images ${status} to ${target_docker_repo} completed, and check the following images in the Docker registry:" -for img_load in ${!prereqimages[@]} +for img_load in ${prereqimages[@]} do echo " - ${target_docker_repo}/${img_load}" done diff --git a/scripts/pull-eventstreams-connection-info.sh b/scripts/pull-eventstreams-connection-info.sh new file mode 100755 index 00000000..d2b2769a --- /dev/null +++ b/scripts/pull-eventstreams-connection-info.sh @@ -0,0 +1,273 @@ +#!/bin/bash +#set -x +############################################################################### +# +# Licensed Materials - Property of IBM +# +# (C) Copyright IBM Corp. 2020. All Rights Reserved. +# +# US Government Users Restricted Rights - Use, duplication or +# disclosure restricted by GSA ADP Schedule Contract with IBM Corp. +# +############################################################################### + +# This script offers the user the option to pull the Kafka connection information from a +# preexisting IBM Event Streams instance, and to fill the shared_configuration +# element of an ICP4A Custom Resource (CR) such that the operator can automatically configure the Kafka +# clients of ICP4A products. +# The purpose of this script is primarily to be called by the demo pattern script. +# You can also execute it directly, passing a CR file path as unique argument. +# Prerequisite: oc login must be already done. + +readonly PULL_ES_SCRIPT_PATH=$(dirname $0) +readonly TEMP_PULL_ES_FOLDER=${PULL_ES_SCRIPT_PATH}/.tmpPullEventstreamsConfig +CUR_DIR=$(cd ${PULL_ES_SCRIPT_PATH}; pwd) + +# Import common utilities and environment variables +source ${PULL_ES_SCRIPT_PATH}/helper/common.sh + +function usage() { + echo "Augment an ICP4A Custom Resource with Kafka connection information extracted" + echo "automatically from an Event Streams instance already present in the current namespace." + echo "Syntax:" + echo " $(basename $0) -f " + echo "Options:" + echo " -c icp4a_cr_file (required) The path of an ICP4A CR file." + echo " -h This help" + echo "IMPORTANT: oc login must be already done when calling this script." + echo + exit 1 +} + +# Initialize environment variables, initialize CLI, and perform initial cleanup. +function init() { + # Name of the EventStreams KafkaUser kubernetes object used for ICP4A. + readonly ES_KAFKA_USER_RESOURCE_NAME="eventstreams-for-icp4a-kafka-user" + + # Name of the temporary file used for deploying an EventStreams KafkaUser resource. + readonly ES_KAFKA_USER_FILENAME="${TEMP_PULL_ES_FOLDER}/kafkauser.yaml" + + # Name of the YAML file of the Kafka connection secret + readonly KAFKA_CONNECTION_SECRET_FILENAME="${TEMP_PULL_ES_FOLDER}/kafkasecret.yaml" + + # Name of the secret that contains Kafka connection information + readonly KAFKA_CONNECTION_SECRET="icp4a-kafka-connection-secret" + + cleanup + + mkdir -p ${TEMP_PULL_ES_FOLDER} >/dev/null 2>&1 + + validate_cli +} + +# Removes temporary files, if any left from a previous run. +function cleanup() { + rm -f ${ES_KAFKA_USER_FILENAME} + rm -f ${KAFKA_CONNECTION_SECRET_FILENAME} + rm -rf ${TEMP_PULL_ES_FOLDER} +} + +function get_eventsreams_connection_info() { + local ICP4A_CR_NAME=${1:?Missing ICP4A CR name} + + # Search for an EventStreams instance in the current namespace + echo "Searching for an EventStreams instance in the current namespace..." + + oc get EventStreams --no-headers | grep "Ready" >/dev/null 2>&1 + returnValue=$? + if [ "$returnValue" == 1 ] ; then + echo_bold "No instance of EventStreams found. Aborting." + exit 1 + fi + echo_bold "EventStreams instance found." + # Extract the first word from the output of the oc command + local ES_CR_NAME="$(oc get EventStreams --no-headers | grep "Ready" | awk '{print $1;}')" + echo "EventStreams CR name: ${ES_CR_NAME}" + + ES_CR_STATUS_PHASE="$(oc get EventStreams ${ES_CR_NAME} -o jsonpath='{.status.phase}')" + echo "EventStreams CR status.phase: ${ES_CR_STATUS_PHASE}" + + if [[ "${ES_CR_STATUS_PHASE}" != "Ready" ]]; then + echo_red "The found EventStreams instance does not have status.phase=Ready. Aborting." + exit 1 + fi + + deploy_eventstreams_kakfa_user ${ES_CR_NAME} ${ICP4A_CR_NAME} +} + +function deploy_eventstreams_kakfa_user() { + local ES_CR_NAME=${1:?Missing EventStreams CR name argument} + local ICP4A_CR_NAME=${2:?Missing ICP4A CR name argument} + + cat < "${ES_KAFKA_USER_FILENAME}" +apiVersion: eventstreams.ibm.com/v1beta1 +kind: KafkaUser +metadata: + labels: + eventstreams.ibm.com/cluster: ${ES_CR_NAME} + name: ${ES_KAFKA_USER_RESOURCE_NAME} +spec: + authentication: + type: scram-sha-512 + authorization: + acls: + - host: '*' + operation: Read + resource: + name: '*' + patternType: literal + type: topic + - host: '*' + operation: Describe + resource: + name: '*' + patternType: literal + type: topic + - host: '*' + operation: Read + resource: + name: '*' + patternType: literal + type: group + - host: '*' + operation: Write + resource: + name: '*' + patternType: literal + type: topic + - host: '*' + operation: Create + resource: + name: '*' + patternType: literal + type: topic + - host: '*' + operation: Describe + resource: + name: '*' + patternType: literal + type: topic + type: simple +EOF + + echo "Deploying KafkaUser ${ES_KAFKA_USER_RESOURCE_NAME}..." + oc apply -f ${ES_KAFKA_USER_FILENAME} + + # It typically takes about 2 minutes before the KafkaUser object passes in Ready status. + echo "Wait 5 seconds for KafkaUser ${ES_KAFKA_USER_RESOURCE_NAME} to pass in Ready status..." + sleep 5 + + local ES_KAFKA_USER_STATUS=$(oc get KafkaUser ${ES_KAFKA_USER_RESOURCE_NAME} -o jsonpath='{.status.conditions[0].type}') + if [[ "${ES_KAFKA_USER_STATUS}" != "Ready" ]]; then + echo_red "${ES_KAFKA_USER_RESOURCE_NAME} is not ready. Aborting." + exit 1 + fi + + local KAFKA_SERVER_CERTIFICATE=$(get_kafka_server_certificate_base64 ${ES_CR_NAME}) + local KAFKA_USERNAME=$(get_kafka_username_base64) + local KAFKA_PASSWORD=$(get_kafka_password_base64) + + local KAFKA_BOOTSTRAP_HOST="$(oc get EventStreams ${ES_CR_NAME} -o jsonpath='{.status.kafkaListeners[1].addresses[0].host}')" + local KAFKA_BOOTSTRAP_PORT="$(oc get EventStreams ${ES_CR_NAME} -o jsonpath='{.status.kafkaListeners[1].addresses[0].port}')" + local KAFKA_BOOTSTRAP_SERVERS="${KAFKA_BOOTSTRAP_HOST}:${KAFKA_BOOTSTRAP_PORT}" + + # For use in the operator to configure the Kafka connection of products, store + # confidential connection information in a secret. The name is provided + # in shared_configuration.kafka_configuration.kafka_connection_secret. + deploy_kafka_connection_secret ${KAFKA_USERNAME} ${KAFKA_PASSWORD} ${KAFKA_SERVER_CERTIFICATE} + + # Non-confidential Kafka connection information is provided in the CR. + apply_changes_to_icp4a_cr ${ICP4A_CR_NAME} ${KAFKA_BOOTSTRAP_SERVERS} ${ES_CR_NAME} +} + +function apply_changes_to_icp4a_cr() { + local ICP4A_CR_NAME=${1:?Missing ICP4A CR file} + local KAFKA_BOOTSTRAP_SERVERS=${2:?Missing Kafka bootstrap servers} + local ES_CR_NAME=${3:?Missing EventStreams CR name argument} + + local PARAM_BASE="spec.shared_configuration.kafka_configuration" + ${YQ_CMD} w -i ${ICP4A_CR_NAME} ${PARAM_BASE}.bootstrap_servers ${KAFKA_BOOTSTRAP_SERVERS} + ${YQ_CMD} w -i ${ICP4A_CR_NAME} ${PARAM_BASE}.security_protocol "SASL_SSL" + ${YQ_CMD} w -i ${ICP4A_CR_NAME} ${PARAM_BASE}.sasl_mechanism "SCRAM-SHA-512" + ${YQ_CMD} w -i ${ICP4A_CR_NAME} ${PARAM_BASE}.connection_secret_name ${KAFKA_CONNECTION_SECRET} + + echo_bold "Filled the ICP4A CR with the following configuration information for the Event Streams instance ${ES_CR_NAME}:" + ${YQ_CMD} r ${ICP4A_CR_NAME} ${PARAM_BASE} +} + +function deploy_kafka_connection_secret() { + local KAFKA_USERNAME=${1:?Missing Kafka username} + local KAFKA_PASSWORD=${2:?Missing Kafka password} + local KAFKA_SERVER_CERTIFICATE=${3:?Missing Kafka server certificate} + + cat < "${KAFKA_CONNECTION_SECRET_FILENAME}" +apiVersion: v1 +kind: Secret +metadata: + name: ${KAFKA_CONNECTION_SECRET} +type: Opaque +data: + kafka-username: ${KAFKA_USERNAME} + kafka-password: ${KAFKA_PASSWORD} + kafka-server-certificate: ${KAFKA_SERVER_CERTIFICATE} +EOF + + # Remove existing secret if any + oc delete secret ${KAFKA_CONNECTION_SECRET} --ignore-not-found=true + + echo "Deploying secret ${KAFKA_CONNECTION_SECRET}..." + # --validate=false in order to support keys with no value, although typically all keys do have values. + oc create -f ${KAFKA_CONNECTION_SECRET_FILENAME} --validate=false +} + +function get_kafka_username_base64() { + echo -n ${ES_KAFKA_USER_RESOURCE_NAME} | base64 +} + +function get_kafka_password_base64() { + # Note this is base64-encoded + local KAFKA_PASSWORD=$(oc get secret ${ES_KAFKA_USER_RESOURCE_NAME} -o=jsonpath='{.data.password}') + echo ${KAFKA_PASSWORD} +} + +function get_kafka_server_certificate_base64() { + # Note this is base64-encoded + local ES_CR_NAME=${1:?Missing Event Streams CR name} + local KAFKA_SERVER_CERTIFICATE=$(oc get secret/${ES_CR_NAME}-cluster-ca-cert -o "jsonpath={.data.ca\.crt}" | openssl enc -d -A) + echo ${KAFKA_SERVER_CERTIFICATE} +} + +function main() { + local ICP4A_CR_NAME + + while getopts "f:h" option + do + case $option in + f) + ICP4A_CR_NAME=$OPTARG + ;; + h) + usage + ;; + \?) + echo_red "Unrecognized option $OPTARG" + usage + ;; + esac + done + + if [[ -z "${ICP4A_CR_NAME}" ]]; then + echo_red "Missing ICP4A CR name argument" + usage + fi + + init + + # Augment the CR with the connection information for using a preexisting Event Streams 2002.2.1+ instance. + get_eventsreams_connection_info ${ICP4A_CR_NAME} + + # Cleanup temporary files created in this run of the script. + cleanup +} + +main "$@" diff --git a/scripts/upgradeOperator.sh b/scripts/upgradeOperator.sh index 27e99c60..2d69f214 100755 --- a/scripts/upgradeOperator.sh +++ b/scripts/upgradeOperator.sh @@ -78,7 +78,10 @@ fi if [[ $LICENSE_ACCEPTED == "accept" ]]; then sed -e '/dba_license/{n;s/value:/value: accept/;}' ./upgradeOperator.yaml > ./upgradeOperatorsav.yaml ; mv ./upgradeOperatorsav.yaml ./upgradeOperator.yaml - + sed -e '/baw_license/{n;s/value:/value: accept/;}' ./upgradeOperator.yaml > ./upgradeOperatorsav.yaml ; mv ./upgradeOperatorsav.yaml ./upgradeOperator.yaml + sed -e '/fncm_license/{n;s/value:/value: accept/;}' ./upgradeOperator.yaml > ./upgradeOperatorsav.yaml ; mv ./upgradeOperatorsav.yaml ./upgradeOperator.yaml + sed -e '/ier_license/{n;s/value:/value: accept/;}' ./upgradeOperator.yaml > ./upgradeOperatorsav.yaml ; mv ./upgradeOperatorsav.yaml ./upgradeOperator.yaml + if [ ! -z ${IMAGEREGISTRY} ]; then # Change the location of the image echo "Using the operator image name: $IMAGEREGISTRY"