From e37522b6331c979c1d276d4d1a975449a8f5ba78 Mon Sep 17 00:00:00 2001 From: Christina Xu Date: Mon, 12 Aug 2024 09:27:48 -0400 Subject: [PATCH] Update README and other minor edits --- README.md | 88 ++++++++++++++----- .../java/org/kie/trustyai/ConfigService.java | 2 +- .../payloads/SaliencyExplanationResponse.java | 10 +-- 3 files changed, 72 insertions(+), 28 deletions(-) diff --git a/README.md b/README.md index 0fdfb11..30d3105 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,15 @@ The TrustyAI KServe integration provides explanations for predictions made by AI The TrustyAI explainer can be added to KServe `InferenceServices`. Here are YAML configurations to deploy explainers with LIME and SHAP: -### LIME Explainer `InferenceService` +### LIME and SHAP Explainer `InferenceService` -By default, the TrustyAI KServe explainer will use the LIME explainer. You can deploy the explainer using the following YAML configuration: +By default, the TrustyAI KServe explainer will use the **both the LIME and SHAP explainer**. You can deploy the explainers using the following YAML configuration: ```yaml apiVersion: "serving.kserve.io/v1beta1" kind: "InferenceService" metadata: - name: "explainer-test-lime" + name: "explainer-test-all" annotations: sidecar.istio.io/inject: "true" sidecar.istio.io/rewriteAppHTTPProbers: "true" @@ -39,41 +39,85 @@ spec: image: quay.io/trustyai/trustyai-kserve-explainer:latest ``` -### Example: Using the LIME Explainer +### Example: Using the both the LIME and SHAP Explainer -You can interact with the LIME explainer using the following `curl` command: +You can interact with the LIME and SHAP explainer using the following `curl` command: ```bash payload='{"data": {"ndarray": [[1.0, 2.0]]}}' # Adjust payload as per your input requirements curl -s -H "Host: ${HOST}" \ -H "Content-Type: application/json" \ - "http://${GATEWAY}/v1/models/explainer-test-lime:explain" -d $payload + "http://${GATEWAY}/v1/models/explainer-test-all:explain" -d $payload ``` -This command sends a JSON payload to the `:explain` endpoint and retrieves an explanation for the prediction. The response structure includes the saliencies of each feature contributing to the prediction, as shown below: +This command sends a JSON payload to the `:explain` endpoint and retrieves an explanation for the prediction. The response structure includes the explainer type and saliencies of each feature contributing to the prediction, as shown below: ```json { - "saliencies": { - "value": { - "output": {"value": {"underlyingObject": 1}, "type": "NUMBER", "score": 1.0, "name": "value"}, - "perFeatureImportance": [ - { - "feature": {"name": "f", "type": "NUMBER", "value": {"underlyingObject": 0.9}}, - "score": 0.7474712680313286 + "timestamp": "2024-05-06T21:42:45.307+00:00", + "LIME": { + "saliencies": { + "outputs-0": [ + { + "name": "inputs-12", + "score": 0.8496797810357467, + "confidence": 0 + }, + { + "name": "inputs-5", + "score": 0.6830766647546147, + "confidence": 0 + }, + { + "name": "inputs-7", + "score": 0.6768475400887952, + "confidence": 0 + }, + // Additional features + ] } - // Additional features... - ] } - }, - "availableCFs": [], - "sourceExplainer": "LIME" + "SHAP": { + "saliencies": { + // Additional features + } + } } ``` +### LIME Explainer `InferenceService` + +To use the **LIME explainer only**, you can deploy the explainer by specifying it as an environment variable and using the following YAML configuration (initial part will be identical to the previous `InferenceService`): + +```yaml +apiVersion: "serving.kserve.io/v1beta1" +kind: "InferenceService" +metadata: + name: "explainer-test-lime" + annotations: + sidecar.istio.io/inject: "true" + sidecar.istio.io/rewriteAppHTTPProbers: "true" + serving.knative.openshift.io/enablePassthrough: "true" +spec: + predictor: + model: + modelFormat: + name: sklearn + protocolVersion: v2 + runtime: kserve-sklearnserver + storageUri: https://github.com/trustyai-explainability/model-collection/raw/main/credit-score/model.joblib + explainer: + containers: + - name: explainer + image: quay.io/trustyai/trustyai-kserve-explainer:latest + env: + - name: EXPLAINER_TYPE # <- specify LIME here + value: "LIME" +``` + ### SHAP Explainer `InferenceService` -To use the SHAP explainer, you can deploy the explainer by specifying it as an environment variable and using the following YAML configuration (initial part will be identical to the previous `InferenceService`): +To use the **SHAP explainer only**: ```yaml @@ -102,7 +146,7 @@ spec: value: "SHAP" ``` -The explanation request will be identical to the LIME explainer case. +The explanation request for either LIME or SHAP will be identical to both LIME and SHAP. ## Configuration @@ -110,7 +154,7 @@ The following environment variables can be used in the `InferenceService` to cus | Name | Description | Default | |--------------------------------------------------------------------------|--------------------------------------------------------------------|---------------| -| `EXPLAINER_TYPE` | `LIME` or `SHAP`, the explainer to use. | `LIME` | +| `EXPLAINER_TYPE` | `ALL`, `LIME` or `SHAP`, the explainer to use. | `ALL` | | `LIME_SAMPLES` | The number of samples to use in LIME | `200` | | `LIME_RETRIES` | Number of LIME retries | `2` | | `LIME_WLR` | Use LIME Weighted Linear Regression, `true` or `false` | `true` | diff --git a/src/main/java/org/kie/trustyai/ConfigService.java b/src/main/java/org/kie/trustyai/ConfigService.java index d040919..7a970a0 100644 --- a/src/main/java/org/kie/trustyai/ConfigService.java +++ b/src/main/java/org/kie/trustyai/ConfigService.java @@ -8,7 +8,7 @@ @ApplicationScoped public class ConfigService { - @ConfigProperty(name = "explainer.type") + @ConfigProperty(name = "explainer.type", defaultValue = "ALL") ExplainerType explainerType; @ConfigProperty(name = "lime.samples", defaultValue = "200") int limeSamples; diff --git a/src/main/java/org/kie/trustyai/payloads/SaliencyExplanationResponse.java b/src/main/java/org/kie/trustyai/payloads/SaliencyExplanationResponse.java index 544096a..c048234 100644 --- a/src/main/java/org/kie/trustyai/payloads/SaliencyExplanationResponse.java +++ b/src/main/java/org/kie/trustyai/payloads/SaliencyExplanationResponse.java @@ -37,12 +37,13 @@ public void setSaliencies(Map>> @Override public String toString() { return "SaliencyExplanationResponse{" + - "timestamp=" + timestamp + - ", type='" + type + '\'' + - ", saliencies=" + saliencies + - '}'; + "timestamp=" + timestamp + '\'' + + ", LIME=" + saliencies.get(ExplainerType.LIME) + + ", SHAP=" + saliencies.get(ExplainerType.SHAP) + + '}'; } + public static class FeatureSaliency { private String name; @@ -146,5 +147,4 @@ public static SaliencyExplanationResponse fromSaliencyResults(@Nonnull SaliencyR return new SaliencyExplanationResponse(combinedMap); } - }