diff --git a/Dockerfile b/Dockerfile index 5e5a4828..0053073c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,34 +2,31 @@ # https://hub.docker.com/_/python FROM python:3.7-slim -# Default demo app command to run. -ENV APP_COMMAND "lit_nlp.examples.lm_demo:get_wsgi_app()" - -# Copy local code to the container image. -ENV APP_HOME /app -WORKDIR $APP_HOME -COPY . ./ - # Update Ubuntu packages and install basic utils RUN apt-get update RUN apt-get install -y wget curl gnupg2 gcc g++ # Install yarn RUN curl -sS https://dl.yarnpkg.com/debian/pubkey.gpg | apt-key add - -RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | tee /etc/apt/sources.list.d/yarn.list +RUN echo "deb https://dl.yarnpkg.com/debian/ stable main" | \ + tee /etc/apt/sources.list.d/yarn.list RUN apt update && apt -y install yarn # Install Anaconda RUN wget https://repo.anaconda.com/miniconda/Miniconda3-latest-Linux-x86_64.sh \ - && bash Miniconda3-latest-Linux-x86_64.sh -b -p /opt/anaconda3 \ - && rm Miniconda3-latest-Linux-x86_64.sh + && bash Miniconda3-latest-Linux-x86_64.sh -b -p /opt/anaconda3 \ + && rm Miniconda3-latest-Linux-x86_64.sh # Set path to conda ENV PATH /opt/anaconda3/bin:$PATH +# Copy local code to the container image. +ENV APP_HOME /app +WORKDIR $APP_HOME +COPY . ./ + # Set up conda environment with production dependencies # This step is slow as it installs many packages. -COPY environment.yml . RUN conda env create -f environment.yml # Workaround for 'conda activate' depending on shell features @@ -50,5 +47,15 @@ WORKDIR lit_nlp/client RUN yarn && yarn build && rm -rf node_modules/* WORKDIR $APP_HOME +# Default demo app command to run. +ARG DEFAULT_DEMO="glue_demo" +ENV DEMO_NAME $DEFAULT_DEMO + +ARG DEFAULT_PORT="5432" +ENV DEMO_PORT $DEFAULT_PORT + # Run LIT server -CMD exec gunicorn -c lit_nlp/examples/gunicorn_config.py $APP_COMMAND +ENTRYPOINT exec gunicorn \ + -c lit_nlp/examples/gunicorn_config.py \ + --bind="0.0.0.0:$DEMO_PORT" \ + "lit_nlp.examples.$DEMO_NAME:get_wsgi_app()" diff --git a/RELEASE.md b/RELEASE.md index cbadcc3a..c01393eb 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -1,12 +1,29 @@ # Language Interpretability Tool releases +## Release 0.4.1 + +This is a bug fix release aimed at improving visual clarity and common +workflows. + +The UI has been slightly revamped, bugs have been fixed, and new capabilities +have been added. Notable changes include: + +- Adds "open in new tab" feature to LIT Notebook widget +- Adds support for `SparseMultilabelPreds` to LIME +- Improves color consistency across the UI +- Switching NumPy instead of SciKit Learn for PCA +- Ensuring all built-in demos are compatible with the Docker +- Updating the Dockerfile to support run-time `DEMO_NAME` and `DEMO_PORT` args +- Fixed a rendering bug in the Confusion Matrix related column and row spans + when "hide empty labels" is turned on + ## Release 0.4 This release adds a lot of new features. The website and documentation have been updated accordingly. The UI has been slightly revamped, bugs have been fixed, and new capabilities -have been added. Noteable changes include: +have been added. Notable changes include: - Support for Google Cloud Vertex AI notebooks. - Preliminary support for tabular and image data, in addition to NLP models. - Addition of TCAV global interpretability method. @@ -14,7 +31,7 @@ have been added. Noteable changes include: minimal changes to flip predictions. - New counterfactual generator for tabular data for minimal changes to flip predictions. -- Partial depdence plots for tabular input features. +- Partial dependence plots for tabular input features. - Ability to set binary classification thresholds separately for different facets of the dataset - Controls to find optimal thresholds across facets given different fairness @@ -26,12 +43,12 @@ This release adds the ability to use LIT directly in colab and jupyter notebooks. The website and documentation have been updated accordingly. The UI has been slightly revamped, bugs have been fixed, and new capabilities -have been added. Noteable changes include: +have been added. Notable changes include: - Notebook mode added. - New annotated text visualization module added. - Allow saving/loading of generated datapoints, and dynamic adding of new datasets by path in the UI. -- Added syncronized scrolling between duplicated modules when comparing +- Added synchronized scrolling between duplicated modules when comparing datapoints or models. - Added a focus service for visually linking focus (i.e. hover) states between components. diff --git a/docs/assets/images/actor_to_actress.png b/docs/assets/images/actor_to_actress.png index 0cc2689a..40083bf5 100644 Binary files a/docs/assets/images/actor_to_actress.png and b/docs/assets/images/actor_to_actress.png differ diff --git a/docs/assets/images/actress_relative_cav.png b/docs/assets/images/actress_relative_cav.png index 546b6b8c..a3187aaa 100644 Binary files a/docs/assets/images/actress_relative_cav.png and b/docs/assets/images/actress_relative_cav.png differ diff --git a/docs/assets/images/lit-coref-compare.png b/docs/assets/images/lit-coref-compare.png index 5c6212d0..a008c66d 100644 Binary files a/docs/assets/images/lit-coref-compare.png and b/docs/assets/images/lit-coref-compare.png differ diff --git a/docs/assets/images/lit-coref-data.png b/docs/assets/images/lit-coref-data.png index 82888513..adc2297d 100644 Binary files a/docs/assets/images/lit-coref-data.png and b/docs/assets/images/lit-coref-data.png differ diff --git a/docs/assets/images/lit-coref-metric-top.png b/docs/assets/images/lit-coref-metric-top.png index ec9862ea..0cb95772 100644 Binary files a/docs/assets/images/lit-coref-metric-top.png and b/docs/assets/images/lit-coref-metric-top.png differ diff --git a/docs/assets/images/lit-coref-metrics.png b/docs/assets/images/lit-coref-metrics.png index 03d68d40..995c4187 100644 Binary files a/docs/assets/images/lit-coref-metrics.png and b/docs/assets/images/lit-coref-metrics.png differ diff --git a/docs/assets/images/lit-coref-pred.png b/docs/assets/images/lit-coref-pred.png index d7d4a937..dfe8099b 100644 Binary files a/docs/assets/images/lit-coref-pred.png and b/docs/assets/images/lit-coref-pred.png differ diff --git a/docs/assets/images/lit-coref-select.png b/docs/assets/images/lit-coref-select.png index f0050977..ca4e7c05 100644 Binary files a/docs/assets/images/lit-coref-select.png and b/docs/assets/images/lit-coref-select.png differ diff --git a/docs/assets/images/lit-datatable-search.png b/docs/assets/images/lit-datatable-search.png index 6b8702c3..905bbdb3 100644 Binary files a/docs/assets/images/lit-datatable-search.png and b/docs/assets/images/lit-datatable-search.png differ diff --git a/docs/assets/images/lit-metrics-not.png b/docs/assets/images/lit-metrics-not.png index f89c68d6..7d074c19 100644 Binary files a/docs/assets/images/lit-metrics-not.png and b/docs/assets/images/lit-metrics-not.png differ diff --git a/docs/assets/images/lit-not-saliency.png b/docs/assets/images/lit-not-saliency.png index 1ff6d971..33d9cba6 100644 Binary files a/docs/assets/images/lit-not-saliency.png and b/docs/assets/images/lit-not-saliency.png differ diff --git a/docs/assets/images/lit-saliency.png b/docs/assets/images/lit-saliency.png index 2cd0d27d..f9af0032 100644 Binary files a/docs/assets/images/lit-saliency.png and b/docs/assets/images/lit-saliency.png differ diff --git a/docs/assets/images/lit-sim-search.png b/docs/assets/images/lit-sim-search.png index 529c27b9..8c95a6d2 100644 Binary files a/docs/assets/images/lit-sim-search.png and b/docs/assets/images/lit-sim-search.png differ diff --git a/docs/assets/images/lit-t5.png b/docs/assets/images/lit-t5.png index b63c1a1c..8db30f4a 100644 Binary files a/docs/assets/images/lit-t5.png and b/docs/assets/images/lit-t5.png differ diff --git a/docs/assets/images/lit-toolbars.gif b/docs/assets/images/lit-toolbars.gif index e5ed75f5..d0b856ea 100644 Binary files a/docs/assets/images/lit-toolbars.gif and b/docs/assets/images/lit-toolbars.gif differ diff --git a/docs/assets/images/lit-tweet.gif b/docs/assets/images/lit-tweet.gif index 4579c026..4525c18e 100644 Binary files a/docs/assets/images/lit-tweet.gif and b/docs/assets/images/lit-tweet.gif differ diff --git a/docs/assets/images/lit-workspaces.jpg b/docs/assets/images/lit-workspaces.jpg deleted file mode 100644 index 25a6609e..00000000 Binary files a/docs/assets/images/lit-workspaces.jpg and /dev/null differ diff --git a/docs/assets/images/lit-workspaces.png b/docs/assets/images/lit-workspaces.png new file mode 100644 index 00000000..ca0b9ff7 Binary files /dev/null and b/docs/assets/images/lit-workspaces.png differ diff --git a/docs/assets/images/lit_data_table_annotated.png b/docs/assets/images/lit_data_table_annotated.png index f8efede2..830ad979 100644 Binary files a/docs/assets/images/lit_data_table_annotated.png and b/docs/assets/images/lit_data_table_annotated.png differ diff --git a/docs/assets/images/lit_slice_editor_annotated.png b/docs/assets/images/lit_slice_editor_annotated.png index 1f670341..066eaca9 100644 Binary files a/docs/assets/images/lit_slice_editor_annotated.png and b/docs/assets/images/lit_slice_editor_annotated.png differ diff --git a/docs/assets/images/lit_tcav_screen_annotated.png b/docs/assets/images/lit_tcav_screen_annotated.png index 115696da..f81b03f7 100644 Binary files a/docs/assets/images/lit_tcav_screen_annotated.png and b/docs/assets/images/lit_tcav_screen_annotated.png differ diff --git a/docs/assets/images/lit_tcav_settings_annotated.png b/docs/assets/images/lit_tcav_settings_annotated.png index d48f0eb2..54db8e31 100644 Binary files a/docs/assets/images/lit_tcav_settings_annotated.png and b/docs/assets/images/lit_tcav_settings_annotated.png differ diff --git a/docs/assets/images/tcav_result.png b/docs/assets/images/tcav_result.png index 0f76b4d9..385001f4 100644 Binary files a/docs/assets/images/tcav_result.png and b/docs/assets/images/tcav_result.png differ diff --git a/docs/assets/images/tcav_results_2.png b/docs/assets/images/tcav_results_2.png index b10ac569..ea43952e 100644 Binary files a/docs/assets/images/tcav_results_2.png and b/docs/assets/images/tcav_results_2.png differ diff --git a/docs/demos/coref.html b/docs/demos/coref.html index 1d01facd..8cd698ee 100644 --- a/docs/demos/coref.html +++ b/docs/demos/coref.html @@ -1,5 +1,5 @@ \ No newline at end of file diff --git a/docs/demos/glue.html b/docs/demos/glue.html index 07b5c8af..650e104a 100644 --- a/docs/demos/glue.html +++ b/docs/demos/glue.html @@ -1,5 +1,5 @@ \ No newline at end of file diff --git a/docs/demos/images.html b/docs/demos/images.html index 56ba865e..750dfbe2 100644 --- a/docs/demos/images.html +++ b/docs/demos/images.html @@ -1,5 +1,5 @@ \ No newline at end of file diff --git a/docs/demos/index.html b/docs/demos/index.html index 112b22fa..6b3b3ecc 100644 --- a/docs/demos/index.html +++ b/docs/demos/index.html @@ -122,12 +122,12 @@
Notebook usage
BERT binary classification notebooks
-
DATA SOURCES
+
DATA SOURCE
- Stanford Sentiment Treebank + Stanford Sentiment Treebank
Use LIT directly inside a Colab notebook. Explore binary classification for sentiment analysis using SST2 from the General Language Understanding Evaluation (GLUE) benchmark suite.
-
+
Gender bias in coreference systems
diff --git a/docs/demos/lm.html b/docs/demos/lm.html index d132c1be..750dfbe2 100644 --- a/docs/demos/lm.html +++ b/docs/demos/lm.html @@ -1,5 +1,5 @@ \ No newline at end of file diff --git a/docs/demos/penguins.html b/docs/demos/penguins.html index 09850b34..0d3f22be 100644 --- a/docs/demos/penguins.html +++ b/docs/demos/penguins.html @@ -1,5 +1,5 @@ \ No newline at end of file diff --git a/docs/demos/t5.html b/docs/demos/t5.html index 66562571..8d499e72 100644 --- a/docs/demos/t5.html +++ b/docs/demos/t5.html @@ -1,5 +1,5 @@ \ No newline at end of file diff --git a/docs/index.html b/docs/index.html index ab652a37..8208c065 100644 --- a/docs/index.html +++ b/docs/index.html @@ -98,7 +98,7 @@
  • Does my model behave consistently if I change things like textual style, verb tense, or pronoun gender?
  • LIT contains many built-in capabilities but is also customizable, with the ability to add custom interpretability techniques, metrics calculations, counterfactual generators, visualizations, and more.

    -

    LIT also contains preliminary support for non-language models, working with tabular and image data. For a similar tool built to explore general-purpose machine learning models, check out the What-If Tool.

    +

    In addition to language, LIT also includes preliminary support for models operating on tabular and image data. For a similar tool built to explore general-purpose machine learning models, check out the What-If Tool.

    LIT can be run as a standalone server, or inside of python notebook environments such as Colab, Jupyter, and Google Cloud Vertex AI Notebooks.

    @@ -155,7 +155,7 @@

    Version 0.4

    -
    CODE
    +
    DOCS

    Documentation

    LIT is open-source and easily extensible to new models, tasks, and more.
    View documentation
    diff --git a/docs/tutorials/generation/index.html b/docs/tutorials/generation/index.html index bf0c075d..983c3cae 100644 --- a/docs/tutorials/generation/index.html +++ b/docs/tutorials/generation/index.html @@ -90,7 +90,7 @@

    Debugging a Text Generator

    from the T5 decoder. With one click, we retrieve the 25 nearest neighbors to our datapoint of interest from the training set and add them to the LIT UI for inspection. We can see through the search capability in the data table that the words “captain” and “former” appear 34 and 16 times in these examples–along with 3 occurrences of “replaced by” and two occurrences of “by former”. This suggests a strong prior toward our erroneous phrase from the training data most related to our datapoint of interest.

    -
    Above: An example of the existance of "by former" in a similar datapoint from the training set.
    +
    Above: An example of the existence of "by former" in a similar datapoint from the training set.
    diff --git a/docs/tutorials/tcav/index.html b/docs/tutorials/tcav/index.html index d66def1a..886a04c3 100644 --- a/docs/tutorials/tcav/index.html +++ b/docs/tutorials/tcav/index.html @@ -118,9 +118,9 @@

    Create a Concept

    1: Selecting a slice as a concept and 2: running TCAV with the specified options

    Interpreting TCAV scores

    -

    Once we run TCAV, we see an entry in the table in the TCAV module for each concept tested. Each concept gets a CAV (“Concept Activation Vector”) score between 0 and 1 describing the concept’s effect on the prediction of the class in question. What you want to look at is where the blue bar (CAV score) is relative to the black line (reference point). The reference point indicates the effect that slices made of randomly-chosen datapoints outside of the concept being tested has on prediction of the class. For a well-calibrated classifier, the reference point will usually be near 0.5 (i.e. no effect).

    -

    A blue bar extending beyond or falling short of the black line means the concept is influencing the prediction. If the blue bar extends beyond the black line, the concept is positively influencing the prediction; if it falls short, it is negatively influencing.

    -

    In our example, the CAV score of ~.85 indicates that our “acting” concept has a strong positive effect on the prediction of this class. So we have found that this concept has a positive effect on predicting positive sentiment for our classifier.

    +

    Once we run TCAV, we see an entry in the table in the TCAV module for each concept tested. Each concept gets a CAV (“Concept Activation Vector”) score between 0 and 1 describing the concept’s effect on the prediction of the class in question. What matters is where the blue bar (CAV score) is relative to the black line (reference point). The reference point indicates the effect that slices made of randomly-chosen datapoints outside of the concept being tested has on prediction of the class. For a well-calibrated classifier, the reference point will usually be near 0.5 (i.e. no effect).

    +

    A blue bar extending right or left of the black line means the concept is influencing the prediction. If the blue bar extends to the right of the black line, the concept is positively influencing the prediction. Conversely, if the bar extended to the left, it is negatively influencing. In either case, the larger the bar, the greater the influence.

    +

    In our example, the CAV score of ~0.91 indicates that our “acting” concept has a strong positive effect on the prediction of this class. So we have found that this concept has a positive effect on predicting positive sentiment for our classifier.

    TCAV results for our “acting” concept
    diff --git a/docs/tutorials/tour/index.html b/docs/tutorials/tour/index.html index 6d50b127..7aa59d3c 100644 --- a/docs/tutorials/tour/index.html +++ b/docs/tutorials/tour/index.html @@ -84,7 +84,7 @@

    A Quick Tour of the Language Interpretability Tool

    Building blocks - modules, groups, and workspaces

    Modules, groups, and workspaces form the building blocks of LIT. Modules are discrete windows in which you can perform a specific set of tasks or analyses. Workspaces display combinations of modules known as groups, so you can view different visualizations and interpretability methods side-by-side.

    - +
    Above: Building blocks of the Language Interpretability Tool: (1) Modules, (2) Groups, (3) Static workspace, (4) Group-based workspace.

    LIT is divided into two workspaces - a Main workspace in the upper half of the interface, and a Group-based workspace in the lower half.

    @@ -108,7 +108,7 @@

    Toolbars

    @@ -123,7 +123,7 @@

    Using Modules

    Visualizations that scale
    Visualizations and results within modules can scale depending on if you're looking at one data point, multiple data points, one or multiple models. For instance, turning on the “compare datapoints” toggle allows you to compare a selected datapoint (known as the reference datapoint) to others by presenting individual results side-by-side within relevant modules.
    -

    Now that you’re familiar with LIT’s interface, take LIT for a spin in our demos or explore different case studies.

    +

    Now that you are familiar with LIT’s interface, take LIT for a spin in our demos or explore different case studies.

    diff --git a/documentation/api.md b/documentation/api.md index b2a319c3..53f41ec1 100644 --- a/documentation/api.md +++ b/documentation/api.md @@ -803,7 +803,13 @@ passing in a dict of models and a dict of datasets, similar to the parameter that specifies the height in pixels to render the LIT UI. Then, in its own output cell, call the `render` method on the widget object to -render the LIT UI. The LIT UI can be rendered in multiple cells if desired. +render the LIT UI. The LIT UI can be rendered in multiple cells if desired. The +LIT UI can also be rendered in its own browser tab, outside of the notebook, by +passing the parameter `open_in_new_tab=True` to the `render` method. The +`render` method can optionally take in a configuration object to specify +certain options to render the LIT UI using, such as the selected layout, +current display tab, dataset, and models. See +[notebook.py](../lit_nlp/notebook.py) for details. The widget has a `stop` method which shuts down the widget's server. This can be important for freeing up resources if you plan to create multiple LIT widget diff --git a/documentation/components.md b/documentation/components.md index 8bbf1213..4bc2927e 100644 --- a/documentation/components.md +++ b/documentation/components.md @@ -1,6 +1,6 @@ # Components and Features - + @@ -23,7 +23,7 @@ out-of-the-box support for a few modeling frameworks, described below. LIT supports Estimator and other TF1.x models, but the model wrappers can be more involved due to the need to explicitly manage the graph and sessions. (In -particular: `Estimator.predict()` cannot be used because it re-loads the model +particular: `Estimator.predict()` cannot be used because it reloads the model on every invocation.) Generally, you'll need to: * In your model's `__init__()`, build the graph, create a persistent TF @@ -31,10 +31,10 @@ on every invocation.) Generally, you'll need to: * In your `predict()` or `predict_minibatch()` function, build a feed dict and call `session.run` directly. -Alternatively, you can export to a SavedModel and load this in an eager mode +Alternatively, you can export to a `SavedModel` and load this in an eager mode runtime. This leads to much simpler code (see [this example](../lit_nlp/examples/models/t5.py?l=72&rcl=378713625)), -but may require changes to your SavedModel exporter in order to access model +but may require changes to your `SavedModel` exporter in order to access model internals like embeddings, gradients, or attention. ### HuggingFace Transformers @@ -102,7 +102,7 @@ functionality, and counterfactual generation - are available for any model. ### Classification LIT supports many features for classification tasks, including common metrics, -confusion matricies, and custom thresholding via the UI. Classification is +confusion matrices, and custom thresholding via the UI. Classification is implemented with the `MulticlassPreds` and `CategoryLabel` types. * For a basic example on a binary sentiment task, see @@ -171,7 +171,7 @@ or decoder. `TextSegment` field (for a single reference) or a `ReferenceTexts` field (for multiple references), and the model's output field should set `parent=` accordingly. -* To use a model in scoring mode over one or more pre-defined target +* To use a model in scoring mode over one or more predefined target sequences, the model can also output a `ReferenceScores` field (with values as `List[float]`) with `parent=` set to reference a `TextSegment` or `ReferenceTexts` field from the input. @@ -230,14 +230,15 @@ images as model output. The LIT type `ImageBytes` can be used as a feature in datasets and as part of an input spec or output spec for a model. That feature's value must be a base64 encoded string for an image. -NOTE: We may transition images away from encded strings, moving to individual +NOTE: We may transition images away from encoded strings, moving to individual pixel color values. We will ensure we don't break existing checked-in code with such a change. -* See [lit_nlp/examples/datasets/open_images.py](../lit_nlp/examples/datasets/open_images.py) for a dataset - containing images, including converting images to base64 encoded strings. +* See `google3/third_party/py/lit_nlp/examples/datasets/open_images.py` for a + dataset containing images, including converting images to base64 encoded + strings. * For a demo of an image classifier, see - [lit_nlp/examples/image_demo.py](../lit_nlp/examples/image_demo.py). + `google3/third_party/py/lit_nlp/examples/image_demo.py`. ## Token-based Salience @@ -390,7 +391,7 @@ To enable pixel-based salience methods for models that take images as inputs, your model should, as part of the [output spec and `predict()` implementation](./api.md#models): -* Return a `ImageGradients` field with the `align` attriute pointing to the +* Return a `ImageGradients` field with the `align` attribute pointing to the name of the `ImageBytes` field and, optionally, the `grad_target_field_key` attribute pointing to the `CategoryLabel` field in input spec that specifies the target class for which to take gradients, if the model can process that @@ -509,7 +510,7 @@ Custom metrics can be easily defined in Python; see the ### Confusion Matrix LIT includes a powerful and flexible confusion matrix, which can be used to -compar predictions to gold labels as well as to compare between two models or +compare predictions to gold labels as well as to compare between two models or between different categorical features. You can click cells or row/column headers to select a subset of examples, which is useful for intersectional analysis. @@ -572,7 +573,7 @@ on different fairness constraints that may be of interest to the user. One such constraint is demographic parity, which attempts to have an equal percentage of positive classifications for each subset. Another is equal accuracy, which attempts to have an equal accuracy score for each subset. There -is also equal opporitunity, which attempts to equalize for each subset the +is also equal opportunity, which attempts to equalize for each subset the percentage of positive predictions among those datapoints with a positive ground truth label. @@ -631,12 +632,12 @@ Every dataset/model is different, but for images, as low as 15 data points are shown to be sufficient. Start by adding at least 3 data points and add more as needed -For this example, we select all examples containing the word 'humor' in the data -table. +For this example, we select all examples related to acting in the data table +using the selector `acting|actor|actress`. ![Data table - select examples](./images/components/tcav-search-examples.png) -2.) Next, name the slice and click 'Create slice'. +2.) Next, name the slice `acting` and click 'Create slice'. ![Slice](./images/components/tcav-create-slice.png) @@ -651,11 +652,11 @@ selected slice's examples against those in the negative slice. ![TCAV1](./images/components/tcav-select-slice.png) When the run is complete (usually after a few seconds), the results are -displayed in the table. In this example, the TCAV score is ~0.8, which is higher -than the baseline indicated by the black bar. (Technically, the baseline -represents 'null hypothesis', calculated with random concepts.) From this, we -gather that the humor concept positively influences the prediction class 1, or -positive sentiment. +displayed in the table. In this example, the TCAV score is ~0.9 (shown by the +blue bar in the score bar), which is higher than the baseline (shown as the +black bar in the score bar ), indicating that the acting concept positively +influences the prediction class 1, or positive sentiment. (Technically, the +baseline represents 'null hypothesis', calculated with random concepts.) ![TCAV2](./images/components/tcav-results-table.png) diff --git a/documentation/docker.md b/documentation/docker.md index a4978caa..e98382a6 100644 --- a/documentation/docker.md +++ b/documentation/docker.md @@ -1,6 +1,6 @@ # Running LIT in a Docker container - + Users might want to deploy LIT onto servers for public-facing, long-running instances. This is how we host the LIT demos found on @@ -29,3 +29,11 @@ found in Then, our container is built and deployed following the basics of the [GKE tutorial](https://cloud.google.com/kubernetes-engine/docs/tutorials/hello-app). + +You can launch any of the built-in demos from the same Docker image. First, +build the image with `docker build -t lit:latest .`. Running a container from +this image, as with `docker run --rm -p 5432:5432 lit:latest`, will start +the GLUE demo and mount it on port 5432.You cna change the demo by setting the +`$DEMO_NAME` environment variable to one of the valid demo names, and you can +change the port by setting the `$DEMO_PORT` environment variable. Remember to +change the `-p` option to forward the container's port to the host. diff --git a/documentation/images/components/attention.png b/documentation/images/components/attention.png index 906422af..c917ee70 100644 Binary files a/documentation/images/components/attention.png and b/documentation/images/components/attention.png differ diff --git a/documentation/images/components/classification-results.png b/documentation/images/components/classification-results.png index 90f21f11..991fe6ff 100644 Binary files a/documentation/images/components/classification-results.png and b/documentation/images/components/classification-results.png differ diff --git a/documentation/images/components/confusion-matrix.png b/documentation/images/components/confusion-matrix.png index ab23d3a9..5569a74b 100644 Binary files a/documentation/images/components/confusion-matrix.png and b/documentation/images/components/confusion-matrix.png differ diff --git a/documentation/images/components/embeddings.png b/documentation/images/components/embeddings.png index 86341c99..ab92d4d7 100644 Binary files a/documentation/images/components/embeddings.png and b/documentation/images/components/embeddings.png differ diff --git a/documentation/images/components/generation-results.png b/documentation/images/components/generation-results.png index 4969e2d1..4d67befa 100644 Binary files a/documentation/images/components/generation-results.png and b/documentation/images/components/generation-results.png differ diff --git a/documentation/images/components/generator-module.png b/documentation/images/components/generator-module.png index cf5716fb..46c8c672 100644 Binary files a/documentation/images/components/generator-module.png and b/documentation/images/components/generator-module.png differ diff --git a/documentation/images/components/lime-options.png b/documentation/images/components/lime-options.png index 3005632f..9cbbbd99 100644 Binary files a/documentation/images/components/lime-options.png and b/documentation/images/components/lime-options.png differ diff --git a/documentation/images/components/lit-pdps.png b/documentation/images/components/lit-pdps.png index f8d29b1a..efe9fdd7 100644 Binary files a/documentation/images/components/lit-pdps.png and b/documentation/images/components/lit-pdps.png differ diff --git a/documentation/images/components/lit-thresholder.png b/documentation/images/components/lit-thresholder.png index 7bd0ec5f..607251f7 100644 Binary files a/documentation/images/components/lit-thresholder.png and b/documentation/images/components/lit-thresholder.png differ diff --git a/documentation/images/components/manual-edit.png b/documentation/images/components/manual-edit.png index 8f00dd79..7eaf7614 100644 Binary files a/documentation/images/components/manual-edit.png and b/documentation/images/components/manual-edit.png differ diff --git a/documentation/images/components/salience-map.png b/documentation/images/components/salience-map.png index 0807767f..0b484d49 100644 Binary files a/documentation/images/components/salience-map.png and b/documentation/images/components/salience-map.png differ diff --git a/documentation/images/components/scalars-sst2.png b/documentation/images/components/scalars-sst2.png index 9daf05f2..a034fd6a 100644 Binary files a/documentation/images/components/scalars-sst2.png and b/documentation/images/components/scalars-sst2.png differ diff --git a/documentation/images/components/side-by-side-salience.png b/documentation/images/components/side-by-side-salience.png index c11c05e9..09b38d99 100644 Binary files a/documentation/images/components/side-by-side-salience.png and b/documentation/images/components/side-by-side-salience.png differ diff --git a/documentation/images/components/structured-preds.png b/documentation/images/components/structured-preds.png index fa573fcb..fae4aaeb 100644 Binary files a/documentation/images/components/structured-preds.png and b/documentation/images/components/structured-preds.png differ diff --git a/documentation/images/components/tcav-create-slice.png b/documentation/images/components/tcav-create-slice.png index 9f6f8f03..e0db27fc 100644 Binary files a/documentation/images/components/tcav-create-slice.png and b/documentation/images/components/tcav-create-slice.png differ diff --git a/documentation/images/components/tcav-results-table.png b/documentation/images/components/tcav-results-table.png index 339bc55b..385001f4 100644 Binary files a/documentation/images/components/tcav-results-table.png and b/documentation/images/components/tcav-results-table.png differ diff --git a/documentation/images/components/tcav-search-examples.png b/documentation/images/components/tcav-search-examples.png index 0afbb8e2..eb6e096b 100644 Binary files a/documentation/images/components/tcav-search-examples.png and b/documentation/images/components/tcav-search-examples.png differ diff --git a/documentation/images/components/tcav-select-slice.png b/documentation/images/components/tcav-select-slice.png index d678f80c..849632ae 100644 Binary files a/documentation/images/components/tcav-select-slice.png and b/documentation/images/components/tcav-select-slice.png differ diff --git a/documentation/images/figure-1.png b/documentation/images/figure-1.png index c4732000..b14d6cbe 100644 Binary files a/documentation/images/figure-1.png and b/documentation/images/figure-1.png differ diff --git a/documentation/images/lit-attention.png b/documentation/images/lit-attention.png index 69d80e52..93102b58 100644 Binary files a/documentation/images/lit-attention.png and b/documentation/images/lit-attention.png differ diff --git a/documentation/images/lit-classification-results.png b/documentation/images/lit-classification-results.png index 0a49f258..ebfdb8fe 100644 Binary files a/documentation/images/lit-classification-results.png and b/documentation/images/lit-classification-results.png differ diff --git a/documentation/images/lit-conf-matrix.png b/documentation/images/lit-conf-matrix.png index 65603711..4d17b79e 100644 Binary files a/documentation/images/lit-conf-matrix.png and b/documentation/images/lit-conf-matrix.png differ diff --git a/documentation/images/lit-datapoint-compare.png b/documentation/images/lit-datapoint-compare.png index 4fdd6401..6d42bb9a 100644 Binary files a/documentation/images/lit-datapoint-compare.png and b/documentation/images/lit-datapoint-compare.png differ diff --git a/documentation/images/lit-datapoint-generator.png b/documentation/images/lit-datapoint-generator.png index cb6c07a4..921625df 100644 Binary files a/documentation/images/lit-datapoint-generator.png and b/documentation/images/lit-datapoint-generator.png differ diff --git a/documentation/images/lit-datatable.png b/documentation/images/lit-datatable.png index 9cdd05df..52fac18f 100644 Binary files a/documentation/images/lit-datatable.png and b/documentation/images/lit-datatable.png differ diff --git a/documentation/images/lit-embeddings.png b/documentation/images/lit-embeddings.png index c983d57b..9cfc72ab 100644 Binary files a/documentation/images/lit-embeddings.png and b/documentation/images/lit-embeddings.png differ diff --git a/documentation/images/lit-load-data.png b/documentation/images/lit-load-data.png index 8ccb487d..41ae8a52 100644 Binary files a/documentation/images/lit-load-data.png and b/documentation/images/lit-load-data.png differ diff --git a/documentation/images/lit-metrics.png b/documentation/images/lit-metrics.png index 0e6fb858..04e612e5 100644 Binary files a/documentation/images/lit-metrics.png and b/documentation/images/lit-metrics.png differ diff --git a/documentation/images/lit-model-compare.png b/documentation/images/lit-model-compare.png index 84e822c7..6acad1be 100644 Binary files a/documentation/images/lit-model-compare.png and b/documentation/images/lit-model-compare.png differ diff --git a/documentation/images/lit-pred-score.png b/documentation/images/lit-pred-score.png index 316d92c6..052c501e 100644 Binary files a/documentation/images/lit-pred-score.png and b/documentation/images/lit-pred-score.png differ diff --git a/documentation/images/lit-salience.png b/documentation/images/lit-salience.png index 94f8b2f6..0b484d49 100644 Binary files a/documentation/images/lit-salience.png and b/documentation/images/lit-salience.png differ diff --git a/documentation/images/lit-sentiment-analysis.png b/documentation/images/lit-sentiment-analysis.png index 92013643..e6abe533 100644 Binary files a/documentation/images/lit-sentiment-analysis.png and b/documentation/images/lit-sentiment-analysis.png differ diff --git a/documentation/images/lit-settings.png b/documentation/images/lit-settings.png index 1c97d1d8..5362c6d9 100644 Binary files a/documentation/images/lit-settings.png and b/documentation/images/lit-settings.png differ diff --git a/documentation/images/lit-slices.png b/documentation/images/lit-slices.png index d6838fea..1796e7d6 100644 Binary files a/documentation/images/lit-slices.png and b/documentation/images/lit-slices.png differ diff --git a/documentation/images/lit-structured-prediction.png b/documentation/images/lit-structured-prediction.png index 3187612c..57cb18a9 100644 Binary files a/documentation/images/lit-structured-prediction.png and b/documentation/images/lit-structured-prediction.png differ diff --git a/documentation/images/lit-ui.png b/documentation/images/lit-ui.png index 4bcd1b02..e9c05770 100644 Binary files a/documentation/images/lit-ui.png and b/documentation/images/lit-ui.png differ diff --git a/documentation/images/lit-winogender-metrics.png b/documentation/images/lit-winogender-metrics.png new file mode 100644 index 00000000..be08ae1e Binary files /dev/null and b/documentation/images/lit-winogender-metrics.png differ diff --git a/documentation/images/lit-winogender.png b/documentation/images/lit-winogender.png index 2948241d..78ad2ef7 100644 Binary files a/documentation/images/lit-winogender.png and b/documentation/images/lit-winogender.png differ diff --git a/documentation/ui_guide.md b/documentation/ui_guide.md index 3fa728f3..00153397 100644 --- a/documentation/ui_guide.md +++ b/documentation/ui_guide.md @@ -1,6 +1,6 @@ # UI Guide - + This is a user guide for the Language Interpretability Tool (LIT) UI. @@ -23,7 +23,7 @@ In general, the module layout consists of two sections, a top section and a bottom section, separated by a draggable divider to control the height of each section. The top section contains a single set of modules, always shown in the tool. This section is generally used for the main modules that are critical to -tool nagivation, such as the data table and datapoint editor. The bottom section +tool navigation, such as the data table and datapoint editor. The bottom section is comprised of tabs that contain any number of individual modules. The tabs in this section are generally grouped into task-specific sets of modules. @@ -78,7 +78,7 @@ comparison). Once a model or models is selected, you can then select from any dataset compatible with those models. The settings dialog also contains controls switching the layout of the tool. -This can help de-clutter the UI when analysis doesn't require all of the +This can help declutter the UI when analysis doesn't require all of the compatible modules that LIT contains. ![LIT global settings](./images/lit-settings.png "LIT global settings") @@ -117,7 +117,7 @@ with a favorite button to mark this datapoint as a favorite. Favorited datapoints are stored in the automatically-created **"Favorites"** slice, accessible in the slice controls. If only a single datapoint is selected, then the left and right arrow buttons in this toolbar allow cycling of the selected -datapoint through the loaded datatset. If the current selection is a set of +datapoint through the loaded dataset. If the current selection is a set of datapoints, then the left and right arrow buttons control which of those datapoints is the primary selected datapoint, cycling through the datapoints in the current selection. A **"random"** button between the arrows allows selection @@ -195,7 +195,7 @@ passing the `--canonical_url=` flag to the server. ## Module Details -This section contains details on using and interactiving with individual modules +This section contains details on using and interacting with individual modules that are built into LIT. Note that this list may not be complete and additional modules can be created and used in LIT by clients. @@ -206,14 +206,14 @@ full-screen button in the top-right of each module. When using LIT with a model that returns embeddings (or activations), in addition to predictions, the embedding projector will show all datapoints by -their embeddings projected down to 3 dimentions. This is useful for exploring +their embeddings projected down to 3 dimensions. This is useful for exploring and understanding clusters of datapoints. ![LIT embeddings](./images/lit-embeddings.png "LIT embeddings") The specific embedding used to generate the projection can be selected in a dropdown, along with the method of projection (either UMAP or PCA). An -additional dropdown allows changing of the datapoint feature used for the label +additional drop-down allows changing of the datapoint feature used for the label of each datapoint. The labels are shown on datapoint hover or click. The visualization can be rotated through click-and-drag interaction, and panned @@ -228,7 +228,7 @@ selection toolbar. The data table shows all datapoints in a simple table. Datapoints can be selected or unselected through a click. Shift+click allows selecting a set of -consecutive datapoints, and control+click allows selecting a set of individidual +consecutive datapoints, and control+click allows selecting a set of individual datapoints one at a time. Currently selected datapoints are highlighted with a light blue background. The primary selected datapoint is highlighted with a darker blue background. If a set of datapoints is currently selected, clicking @@ -252,7 +252,7 @@ datapoints that are currently selected. The **"reset view"** button returns the data table to its standard, default view. -A **"columns"** dropdown allows showing/hiding of specific columns to customize +A **"columns"** drop-down allows showing/hiding of specific columns to customize what the data table shows. Model predictions can be added as columns through this dropdown, but they are not shown in the data table by default, in order to keep the table decluttered. @@ -285,12 +285,12 @@ selected) through a set of counterfactual datapoint generators. These generators are provided by the backend and all available generators will show up as buttons in the module. Clicking one of these buttons causes the creation of new datapoints that are displayed in a table inside the module and can be added to -the dataset either individually, or altogther, through the add buttons. +the dataset either individually, or altogether, through the add buttons. Generators built into LIT include: * **Scrambler**: Scrambles the words in a text feature randomly. -* **Backtranslation**: Translates a text feature into other languages and then +* **Back-translation**: Translates a text feature into other languages and then back to the source language to create paraphrases of the initial text feature. * **Hotflip**: When analyzing a classification task and the model provides @@ -386,11 +386,7 @@ class before a datapoint is classified as that class as opposed to the default class. The margin value defaults to 0, meaning the class with the highest score is the class the datapoint is inferred to be. -The below screenshot shows this module for a binary classification task where -the positive classification threshold has been changed and the datapoints near -the decision boundary have been selected for further analysis. - -![LIT prediction scores](./images/lit-pred-score.png "LIT prediction scores") +![LIT prediction scores](./images/lit-pred-score.png "LIT prediction scores") ### Model Output @@ -428,33 +424,6 @@ performed on every datapoint selection, but only when explicitly asked for. ![LIT saliency maps](./images/lit-salience.png "LIT saliency maps") -### Counterfactual Explanation - -The counterfactual explanation module, like the salience maps module, shows the -influence of different parts of input features on a model's prediction on the -primary selection. The main difference is that in this module, the influence is -calculated by looking at the model results on this datapoint of interest, -compared to the results on the rest of the selected datapoints, taking into -account where and how those other datapoints different from the primary -selection. - -In this way, the main use-case of this module is to take a single datapoint and -create a large set of counterfactual datapoints from it (through manual changes -and/or the datapoint generator module). Then, select all those counterfactuals -in addition to the primary selection of the datapoint of interest, and run -analysis in this module. Through dropdowns, you can select which model task and, -for classification tasks, which class to explain. - -The end result is a map showing the different influence values for each text -piece in the primary selected datapoint, displayed in the same manner as the -other salience maps. - -The screenshot below shows a selection of datapoint of interest along with a set -of counterfactuals created from it in the data table, along with the -counterfactual explanation results generated from this selection. - -![LIT counterfactual explanation](./images/lit-counterfactual-explanation.png "LIT counterfactual explanation") - ### Attention For models that return attention head information along with predictions, the @@ -487,7 +456,7 @@ depression-era gangster movie.”, while replacing “ultimate” to get “It worst depression-era gangster movie.” elicits a mildly positive score from our model. -![Sentiment analysis](./images/lit-sentiment-analysis.png "Sentiment analysis") +![Sentiment analysis](./images/lit-sentiment-analysis.png "Sentiment analysis") ### Gender Bias in Coreference @@ -495,14 +464,14 @@ Does a system encode gendered associations, which might lead to incorrect predictions? We load a coreference model trained on OntoNotes, and load the Winogender dataset into LIT for evaluation. Each Winogender example has a pronoun and two candidate referents, one a occupation term like (“technician”) -and one an “other participant” (like “customer”). Our model predicts coreference -probabilities for each candidate. We can explore the model’s sensitivity to -pronouns by comparing two examples side-by-side (see screenshot section (a).) We -can see how commonly the model makes similar errors by paging through the -dataset, or by selecting specific slices of interest. For example, we can use -the *Scalars* module (screenshot section (b)) to select datapoints -where the occupation term is associated with a high proportion of (fe)male -workers, according to the U.S. Bureau of Labor Statistics. +and one is an “other participant” (like “customer”). Our model predicts +coreference probabilities for each candidate. We can explore the model’s +sensitivity to pronouns by comparing two examples side-by-side (see structured +predictions in screenshot). We can see how commonly the model makes similar +errors by paging through the dataset, or by selecting specific slices of +interest. + +![Winogender analysis](./images/lit-winogender.png "Winogender analysis") In the *Metrics* table, we can slice this selection by pronoun type and by the true referent. On the set of male-dominated occupations (< 25% female by BLS), @@ -511,16 +480,19 @@ stereotype - e.g. when the answer is the occupation term, male pronouns are correctly resolved 83% of the time, compared to female pronouns only 37.5% of the time (screenshot section (c)). -![Winogender analysis](./images/lit-winogender.png "Winogender analysis") +![Winogender analysis](./images/lit-winogender-metrics.png "Winogender analysis") ### Debugging Text Generation + + Does the training data explain a particular error in text generation? We analyze -a T5 model on the CNN-DM summarization task. LIT’s *Scalars* module +an older T5 model on the CNN-DM summarization task. LIT’s *Scalars* module allows us to look at per-example ROUGE scores, and quickly select an example with middling performance (screenshot section (a)). We find the generated text (screenshot section (b)) contains an erroneous constituent: “alastair cook was -replaces as captain by former captain ...”. We can dig deeper, using LIT’s +replaced as captain by former captain ...”. We can dig deeper, using LIT’s language modeling module (screenshot section (c)) to see that the token “by” is predicted with high probability (28.7%). diff --git a/lit_nlp/api/dataset.py b/lit_nlp/api/dataset.py index 5a3fa1ae..ae446460 100644 --- a/lit_nlp/api/dataset.py +++ b/lit_nlp/api/dataset.py @@ -80,6 +80,11 @@ def __init__(self, self._examples = self._base.examples self._spec = self._base.spec() self._description = self._base.description() + # In case user child class requires the instance to convert examples + # this makes sure the user class is preserved. We cannot do this below + # as the default method is static and does not require instance. + self.lit_example_to_bytes = self._base.lit_example_to_bytes + self.bytes_to_lit_example = self._base.bytes_to_lit_example # Override from direct arguments. self._examples = examples if examples is not None else self._examples @@ -173,6 +178,16 @@ def remap(self, field_map: Dict[str, str]): new_examples = [utils.remap_dict(ex, field_map) for ex in self.examples] return Dataset(new_spec, new_examples, base=self) + @staticmethod + def bytes_to_lit_example(input_bytes: bytes) -> Optional[JsonDict]: + """Convert bytes representation to LIT example.""" + return serialize.from_json(input_bytes.decode('utf-8')) + + @staticmethod + def lit_example_to_bytes(lit_example: JsonDict) -> bytes: + """Convert LIT example to bytes representation.""" + return serialize.to_json(lit_example).encode('utf-8') + IdFnType = Callable[[types.Input], ExampleId] diff --git a/lit_nlp/api/dtypes.py b/lit_nlp/api/dtypes.py index d00e754c..3ee325a7 100644 --- a/lit_nlp/api/dtypes.py +++ b/lit_nlp/api/dtypes.py @@ -30,7 +30,7 @@ on the frontend as corresponding JavaScript objects. """ import abc -from typing import Any, Dict, List, Optional, Sequence, Text, Tuple, Union +from typing import Any, Dict, List, Mapping, Optional, Sequence, Text, Tuple, Union import attr @@ -128,7 +128,7 @@ class LayoutSettings(DataTuple): @attr.s(auto_attribs=True) class LitComponentLayout(DataTuple): - """Frontend UI layout; should match client/lib/types.ts.""" + """Frontend UI layout (legacy); should match client/lib/types.ts.""" # Keys are names of tabs; one must be called "Main". # Values are names of LitModule HTML elements, # e.g. data-table-module for the DataTableModule class. @@ -142,5 +142,24 @@ def to_json(self) -> JsonDict: return attr.asdict(self, recurse=True) +@attr.s(auto_attribs=True) +class LitCanonicalLayout(DataTuple): + """Frontend UI layout; should match client/lib/types.ts.""" + # Keys are names of tabs, and values are names of LitModule HTML elements, + # e.g. data-table-module for the DataTableModule class. + upper: Dict[str, List[str]] + lower: Dict[str, List[str]] = attr.ib(factory=dict) + layoutSettings: LayoutSettings = attr.ib(factory=LayoutSettings) + description: Optional[str] = None + + def to_json(self) -> JsonDict: + """Override serialization to properly convert nested objects.""" + # Not invertible, but these only go from server -> frontend anyway. + return attr.asdict(self, recurse=True) + + +LitComponentLayouts = Mapping[str, Union[LitComponentLayout, + LitCanonicalLayout]] + # pylint: enable=invalid-name # LINT.ThenChange(../client/lib/types.ts) diff --git a/lit_nlp/api/types.py b/lit_nlp/api/types.py index af193f20..4aaa5ef9 100644 --- a/lit_nlp/api/types.py +++ b/lit_nlp/api/types.py @@ -105,6 +105,7 @@ def remap_spec(spec: Spec, keymap: Dict[str, str]) -> Spec: ## # Concrete type clases +# LINT.IfChange @attr.s(auto_attribs=True, frozen=True, kw_only=True) @@ -227,10 +228,10 @@ class MulticlassPreds(LitType): """Multiclass predicted probabilities, as [num_labels].""" # Vocabulary is required here for decoding model output. # Usually this will match the vocabulary in the corresponding label field. - vocab: Sequence[Text] # label names - null_idx: Optional[int] = None # vocab index of negative (null) label - parent: Optional[Text] = None # CategoryLabel field in input - autosort: Optional[bool] = False # Enable automatic sorting + vocab: Sequence[Text] # label names + null_idx: Optional[int] = None # vocab index of negative (null) label + parent: Optional[Text] = None # CategoryLabel field in input + autosort: Optional[bool] = False # Enable automatic sorting @property def num_labels(self): @@ -434,3 +435,6 @@ class SequenceSalience(LitType): class Boolean(LitType): """Boolean value.""" default: bool = False + + +# LINT.ThenChange(../client/lib/types.ts) diff --git a/lit_nlp/app.py b/lit_nlp/app.py index 9c90ddc9..e49d600c 100644 --- a/lit_nlp/app.py +++ b/lit_nlp/app.py @@ -395,7 +395,7 @@ def __init__( generators: Optional[Mapping[Text, lit_components.Generator]] = None, interpreters: Optional[Mapping[Text, lit_components.Interpreter]] = None, annotators: Optional[List[lit_components.Annotator]] = None, - layouts: Optional[Mapping[Text, dtypes.LitComponentLayout]] = None, + layouts: Optional[dtypes.LitComponentLayouts] = None, # General server config; see server_flags.py. data_dir: Optional[Text] = None, warm_start: float = 0.0, diff --git a/lit_nlp/client/core/app_statusbar.css b/lit_nlp/client/core/app_statusbar.css index aa0080a9..bf9a391f 100644 --- a/lit_nlp/client/core/app_statusbar.css +++ b/lit_nlp/client/core/app_statusbar.css @@ -16,10 +16,10 @@ } .toolbar { - border-top: 1px solid #78d9ec; - background-color: #e4f7fb; + border-top: 1px solid var(--lit-cyea-300); + background-color: var(--lit-majtonal-p-50); width: 100vw; - color: #007b83; + color: var(--lit-cyea-700); font-size: 10pt; } @@ -47,11 +47,11 @@ mwc-icon.icon-button { min-width: 18px; --mdc-icon-size: 18px; cursor: pointer; - color: #007b83; + color: var(--lit-cyea-800); } mwc-icon.icon-button:hover { - opacity: .7; + color: var(--lit-cyea-600); } .emoji { diff --git a/lit_nlp/client/core/app_toolbar.css b/lit_nlp/client/core/app_toolbar.css index 0eae821a..8b7fd203 100644 --- a/lit_nlp/client/core/app_toolbar.css +++ b/lit_nlp/client/core/app_toolbar.css @@ -10,7 +10,7 @@ flex-direction: row; justify-content: space-between; align-items: center; - background-color: #2f8c9b; + background-color: var(--lit-cyea-500); font-family: 'Google Sans' !important; color: white; font-size: 18px; @@ -35,21 +35,18 @@ font-family: 'Google Sans', sans; } -mwc-icon.icon-button { +.icon-button { height: 24px; width: 24px; min-width: 24px; --mdc-icon-size: 24px; cursor: pointer; margin: 0pt 2pt; + color: var(--lit-neutral-100); } -mwc-icon.icon-button:hover { - opacity: .7; -} - -mwc-icon.icon-button:active { - opacity: .4; +.icon-button:hover { + color: var(--lit-neutral-400); } .status-emoji { @@ -104,7 +101,7 @@ button { .headline-button:hover { background: rgba(26, 115, 232, 0.04); opacity: .7; - border: 1px solid #BDC1C6; + border: 1px solid var(--lit-neutral-400); } .headline-button:active { diff --git a/lit_nlp/client/core/global_settings.css b/lit_nlp/client/core/global_settings.css index 5a15be59..c3d135c5 100644 --- a/lit_nlp/client/core/global_settings.css +++ b/lit_nlp/client/core/global_settings.css @@ -212,21 +212,27 @@ a:hover { } .datapoints-line { + width: 100%; display: flex; - flex-wrap: wrap; + flex-direction: row; + align-items: center; + justify-content: space-between; } .datapoints-label-holder { + flex: 1; display: flex; - margin-top: 8px; + flex-direction: row; + align-items: center; + justify-content: space-between; margin-left: 16px; } .datapoints-file-input { + flex: 1; height: 22px; - margin-top: -3px; - margin-left: 4px; - width: 300px; + margin-left: 8px; + margin-right: 4px; } .expanded-info.open { @@ -298,8 +304,7 @@ a:hover { } .prev-next-buttons { - margin: 5pt; - margin-top: 0; + margin: 16px 12px; /* 12px acounts for .hairline-button margin */ } button mwc-icon { diff --git a/lit_nlp/client/core/global_settings.ts b/lit_nlp/client/core/global_settings.ts index a7d1c76a..99c6ee21 100644 --- a/lit_nlp/client/core/global_settings.ts +++ b/lit_nlp/client/core/global_settings.ts @@ -269,11 +269,11 @@ export class GlobalSettingsComponent extends MobxLitElement { return html`
    - - +
    @@ -398,10 +398,10 @@ export class GlobalSettingsComponent extends MobxLitElement { value=${this.pathForModel} @input=${updatePath}>
    -
    ${this.modelStatus}
    @@ -470,7 +470,7 @@ export class GlobalSettingsComponent extends MobxLitElement { return html`
    ${icon} - ${modelName} + ${modelName}
    `; })} `; @@ -534,24 +534,22 @@ export class GlobalSettingsComponent extends MobxLitElement { value=${this.pathForDatapoints} @input=${updatePath}>
    - - - -
    -
    ${this.datapointsStatus}
    -
    +
    ${this.datapointsStatus}
    `; return this.renderConfigPage( @@ -637,7 +635,7 @@ export class GlobalSettingsComponent extends MobxLitElement { const statusClasses = classMap({status: true, selected, error: disabled}); // clang-format off return html` -
    +
    ${statusIcon} @@ -709,7 +707,10 @@ export class GlobalSettingsComponent extends MobxLitElement { private nextPrevButton(tab: TabName, next = true) { const icon = next ? 'east' : 'west'; // Arrow direction. - const classes = classMap({'next': next, 'prev': !next}); + const classes = classMap({ + 'hairline-button': true, + [next ? 'next' : 'prev']: true + }); const onClick = () => this.selectedTab = tab; // clang-format off return html` diff --git a/lit_nlp/client/core/main_toolbar.css b/lit_nlp/client/core/main_toolbar.css index 9d48c8d7..eb1f9c38 100644 --- a/lit_nlp/client/core/main_toolbar.css +++ b/lit_nlp/client/core/main_toolbar.css @@ -72,37 +72,6 @@ margin-right: 4pt; } -/** - * For standalone MWC icons as buttons. - * We don't use mwc-icon-button because this adds - * a large backdrop and extra whitespace. - */ -mwc-icon.icon-button { - height: 16px; - width: 16px; - min-width: 16px; - --mdc-icon-size: 16px; - cursor: pointer; - user-select: none; -} - -mwc-icon.icon-button:hover { - opacity: .7; -} - -mwc-icon.mdi-outlined { - --mdc-icon-font: "Material Icons Outlined"; -} - -mwc-icon.disabled { - cursor: default; - color: rgba(60, 64, 67, 0.38); -} - -mwc-icon.disabled:hover { - opacity: 1; -} - /* For in-line icons in a */ [data-icon] { margin: 0; diff --git a/lit_nlp/client/core/main_toolbar.ts b/lit_nlp/client/core/main_toolbar.ts index 3c83715d..437b77ab 100644 --- a/lit_nlp/client/core/main_toolbar.ts +++ b/lit_nlp/client/core/main_toolbar.ts @@ -260,7 +260,7 @@ export class LitMainToolbar extends MobxLitElement { }; // clang-format off return html` - @@ -472,11 +472,10 @@ export class LitMainToolbar extends MobxLitElement {
    -
    +
    Compare datapoints
    @@ -498,7 +497,8 @@ export class LitMainToolbar extends MobxLitElement { ${primaryId !== null ? this.renderPrimarySelectControls() : null} ${this.renderStarButton(numSelected)} ${this.renderSelectionDisplay(numSelected, numTotal)} - diff --git a/lit_nlp/client/core/modules.ts b/lit_nlp/client/core/modules.ts index 00c6c959..b00bf2eb 100644 --- a/lit_nlp/client/core/modules.ts +++ b/lit_nlp/client/core/modules.ts @@ -238,8 +238,11 @@ export class LitModules extends ReactiveElement { // clang-format off }; + const lowerSectionVisible = Object.keys(layout.lower).length > 0; + const upperHeight = lowerSectionVisible ? `${this.mainSectionHeight}vh` : "100%"; + const styles = styleMap({ - '--upper-height': `${this.mainSectionHeight}vh`, + '--upper-height': upperHeight, '--num-tab-bars': `${upperTabsVisible ? 2 : 1}`, }); @@ -251,21 +254,23 @@ export class LitModules extends ReactiveElement { ${this.renderComponentGroups(layout.upper, upperTabToSelect, this.upperLayoutWidths)}
    -
    -
    - ${this.renderTabs(lowerGroupNames, lowerTabToSelect, setLowerTab)} -
    -
    - drag_handle -
    {this.onBarDragged(e);}}> + ${lowerSectionVisible ? html` +
    +
    + ${this.renderTabs(lowerGroupNames, lowerTabToSelect, setLowerTab)} +
    +
    + drag_handle +
    {this.onBarDragged(e);}}> +
    -
    -
    - ${this.renderComponentGroups(layout.lower, lowerTabToSelect, - this.lowerLayoutWidths)} -
    +
    + ${this.renderComponentGroups(layout.lower, lowerTabToSelect, + this.lowerLayoutWidths)} +
    + ` : null}
    `; // clang-format on diff --git a/lit_nlp/client/core/widget_group.css b/lit_nlp/client/core/widget_group.css index 53db63e6..de818882 100644 --- a/lit_nlp/client/core/widget_group.css +++ b/lit_nlp/client/core/widget_group.css @@ -50,12 +50,6 @@ width: 24px; min-width: 24px; --mdc-icon-size: 24px; - color: rgb(95, 99, 104); - cursor: pointer; -} - -.icon-button:hover { - opacity: .7; } .holder { diff --git a/lit_nlp/client/core/widget_group.ts b/lit_nlp/client/core/widget_group.ts index eed610bf..0a9b5954 100644 --- a/lit_nlp/client/core/widget_group.ts +++ b/lit_nlp/client/core/widget_group.ts @@ -35,6 +35,7 @@ import {app} from './app'; import {LitModule} from './lit_module'; import {styles as widgetStyles} from './widget.css'; import {styles as widgetGroupStyles} from './widget_group.css'; +import {styles as sharedStyles} from '../lib/shared_styles.css'; /** Minimum width for a widget group. */ export const MIN_GROUP_WIDTH_PX = 100; @@ -62,7 +63,7 @@ export class WidgetGroup extends LitElement { private syncScrolling = true; static override get styles() { - return [widgetGroupStyles]; + return [sharedStyles, widgetGroupStyles]; } override firstUpdated() { @@ -307,7 +308,7 @@ export class LitWidget extends MobxLitElement { @property({ type: Number }) widgetScrollLeft = 0; static override get styles() { - return widgetStyles; + return [sharedStyles, widgetStyles]; } override async updated() { diff --git a/lit_nlp/client/elements/bar_chart.ts b/lit_nlp/client/elements/bar_chart.ts index a09d1c66..421eef45 100644 --- a/lit_nlp/client/elements/bar_chart.ts +++ b/lit_nlp/client/elements/bar_chart.ts @@ -27,6 +27,7 @@ import {observable} from 'mobx'; import {ReactiveElement} from '../lib/elements'; import {styles} from './bar_chart.css'; +import {styles as sharedStyles} from '../lib/shared_styles.css'; /** @@ -41,7 +42,7 @@ export class BarChart extends ReactiveElement { @property({type: Array}) yScale: number[] = []; static override get styles() { - return [styles]; + return [sharedStyles, styles]; } override firstUpdated() { @@ -131,7 +132,7 @@ export class BarChart extends ReactiveElement { .enter() .append('rect') .attr('width', x.bandwidth()) - .style('fill', '#07a3ba') + .style('fill', 'var(--lit-cyea-400)') // clang-format off .attr('height', (d) => { // The y-axis displays the score. diff --git a/lit_nlp/client/elements/data_matrix.css b/lit_nlp/client/elements/data_matrix.css index 59555305..8084fce1 100644 --- a/lit_nlp/client/elements/data_matrix.css +++ b/lit_nlp/client/elements/data_matrix.css @@ -66,12 +66,6 @@ min-width: 24px; --mdc-icon-size: 24px; --mdc-icon-button-size: 24px; - color: rgb(95, 99, 104); - cursor: pointer; -} - -.icon-button:hover { - opacity: .7; } .delete-cell { diff --git a/lit_nlp/client/elements/data_matrix.ts b/lit_nlp/client/elements/data_matrix.ts index 8ac45943..f8823e1f 100644 --- a/lit_nlp/client/elements/data_matrix.ts +++ b/lit_nlp/client/elements/data_matrix.ts @@ -25,14 +25,23 @@ import {classMap} from 'lit/directives/class-map'; import {styleMap} from 'lit/directives/style-map'; import {computed, observable} from 'mobx'; import {styles} from './data_matrix.css'; +import {styles as sharedStyles} from '../lib/shared_styles.css'; +import {MAJOR_TONAL_COLORS, ramp} from '../lib/colors'; +// Custom color ramp for the Data Matrix +const LOW = 0, HIGH = 8; // 0: -50, 1: -100, 2: -200, etc., HIGH gets excluded +// Text color flips (black => white) above -600, calc the % where that happens +const COLOR_FLIP_PCT = Math.floor((6 - LOW) / (HIGH - 1 - LOW) * 100); +const COLOR_RAMP = ramp([...MAJOR_TONAL_COLORS.primary.slice(LOW, HIGH) + .map(c => c.color)]); + /** * Stores information for each confusion matrix cell. */ export interface MatrixCell { - 'ids': string[]; - 'selected': boolean; + ids: string[]; + selected: boolean; } /** @@ -41,7 +50,7 @@ export interface MatrixCell { @customElement('data-matrix') export class DataMatrix extends LitElement { static override get styles() { - return [styles]; + return [sharedStyles, styles]; } @observable verticalColumnLabels = false; @@ -66,10 +75,11 @@ export class DataMatrix extends LitElement { @computed get colorScale() { - return d3.scaleLinear() - .domain([0, this.totalIds]) - // Need to cast to numbers due to d3 typing. - .range(["#F5F5F5" as unknown as number, "#006064" as unknown as number]); + // Returns a D3 sequential scale with a domain from 0 (i.e., no selected + // datapoints are in this cell) to totalIds (i.e., all selected datapoints + // are in this cell). + // See https://github.com/d3/d3-scale#sequential-scales + return d3.scaleSequential(COLOR_RAMP).domain([0, this.totalIds]); } private updateSelection() { @@ -133,7 +143,7 @@ export class DataMatrix extends LitElement { } const backgroundColor = this.colorScale(cellInfo.ids.length); const percentage = cellInfo.ids.length / this.totalIds * 100; - const textColor = percentage > 50 ? 'white' : 'black'; + const textColor = percentage > COLOR_FLIP_PCT ? 'white' : 'black'; const border = cellInfo.selected ? '2px solid #12B5CB' : '2px solid transparent'; const cellStyle = styleMap({ @@ -174,12 +184,6 @@ export class DataMatrix extends LitElement { // clang-format off return html` - ${rowIndex === 0 ? html` - -
    ${this.rowTitle}
    - ` - : null} ${rowLabel} @@ -285,27 +289,36 @@ export class DataMatrix extends LitElement { 'align-bottom': this.verticalColumnLabels }); + const colsLabelSpan = this.hideEmptyLabels ? colsWithNonZeroCounts.size : + this.colLabels.length; + // Add 2 to the appropriate row count to account for the header rows + // above and below the data rows in the matrix. + const rowsLabelSpan = (this.hideEmptyLabels ? rowsWithNonZeroCounts.size : + this.rowLabels.length) + 2; + // clang-format off return html` - - + + - + + ${this.colLabels.map( (colLabel, colIndex) => this.renderColHeader( colLabel, colIndex, colsWithNonZeroCounts))} - ${this.rowLabels.map( - (rowLabel, rowIndex) => this.renderRow( - rowLabel, rowIndex, rowsWithNonZeroCounts, - colsWithNonZeroCounts))} + ${this.rowLabels.map((rowLabel, rowIndex) => this.renderRow( + rowLabel, rowIndex, rowsWithNonZeroCounts, colsWithNonZeroCounts))} ${this.renderTotalRow(colsWithNonZeroCounts)}
    ${this.renderColumnRotateButton()} + ${this.renderColumnRotateButton()} ${this.colTitle} ${this.renderDeleteButton()}
    +
    ${this.rowTitle}
    +
    Total
    `; diff --git a/lit_nlp/client/elements/interpreter_controls.css b/lit_nlp/client/elements/interpreter_controls.css index 66f75611..7d771d02 100644 --- a/lit_nlp/client/elements/interpreter_controls.css +++ b/lit_nlp/client/elements/interpreter_controls.css @@ -78,13 +78,13 @@ width: 50%; } -input[type="range"] { +.slider { min-width: 60px; flex: 1; } .slider-label { - color: gray; + color: var(--lit-neutral-600); font-size: 8pt; min-width: 3em; } diff --git a/lit_nlp/client/elements/interpreter_controls.ts b/lit_nlp/client/elements/interpreter_controls.ts index 208d3c8f..d40fff3f 100644 --- a/lit_nlp/client/elements/interpreter_controls.ts +++ b/lit_nlp/client/elements/interpreter_controls.ts @@ -97,7 +97,7 @@ export class InterpreterControls extends ReactiveElement {
    ${this.description}
    ${this.renderControls()}
    - +
    @@ -183,28 +183,23 @@ export class InterpreterControls extends ReactiveElement { `; } else if (isLitSubtype(controlType, ['Scalar'])) { // Render a slider. + const step = controlType.step!; + const minVal = controlType.min_val!; + const maxVal = controlType.max_val!; + const updateSettings = (e: Event) => { const input = (e.target as HTMLInputElement); this.settings[name] = input.value; }; - const step = controlType.step!; - const minVal = controlType.min_val!; - const maxVal = controlType.max_val!; - const defaultValue = controlType.default! as string; - // clang-format off return html`
    ${minVal}
    - +
    ${maxVal}
    ${this.settings[name]}
    diff --git a/lit_nlp/client/elements/line_chart.ts b/lit_nlp/client/elements/line_chart.ts index 20345c5f..560ff49b 100644 --- a/lit_nlp/client/elements/line_chart.ts +++ b/lit_nlp/client/elements/line_chart.ts @@ -27,6 +27,7 @@ import {observable} from 'mobx'; import {ReactiveElement} from '../lib/elements'; import {styles} from './line_chart.css'; +import {styles as sharedStyles} from '../lib/shared_styles.css'; /** * Line chart visualization component. @@ -40,7 +41,7 @@ export class LineChart extends ReactiveElement { @property({type: Array}) yScale: number[] = []; static override get styles() { - return [styles]; + return [sharedStyles, styles]; } override firstUpdated() { @@ -109,7 +110,7 @@ export class LineChart extends ReactiveElement { chart.append('path') .datum(data) .attr("fill", "none") - .attr("stroke", "#07a3ba") + .attr("stroke", 'var(--lit-cyea-400)') .attr("stroke-width", 1.5) .attr("d", d3.line() .x((d) => x(d[0])) @@ -121,8 +122,8 @@ export class LineChart extends ReactiveElement { focus.append("circle") .attr("r", 4) - .attr("fill", "#07a3ba") - .attr("stroke", "#07a3ba"); + .attr("fill", 'var(--lit-cyea-400)') + .attr("stroke", 'var(--lit-cyea-400)'); const mousemove = () => { console.log(d3.mouse(this)); diff --git a/lit_nlp/client/elements/score_bar.ts b/lit_nlp/client/elements/score_bar.ts index d18a296e..f427bbc0 100644 --- a/lit_nlp/client/elements/score_bar.ts +++ b/lit_nlp/client/elements/score_bar.ts @@ -44,13 +44,14 @@ export class ScoreBar extends LitElement { padding-right: .75%; margin-left: .35%; margin-right: .35%; - background-color: #07a3ba; + background-color: var(--lit-cyea-300); } .text { position: absolute; padding-left: 4px; padding-right: 2px; + color: var(--lit-neutral-800); } `; } diff --git a/lit_nlp/client/elements/slider.css b/lit_nlp/client/elements/slider.css new file mode 100644 index 00000000..5b8d38da --- /dev/null +++ b/lit_nlp/client/elements/slider.css @@ -0,0 +1,52 @@ +/** Slider styles for Chrome */ +input[type='range'].slider { + -webkit-appearance: none; + max-width: calc(100% - 4px); + background: var(--lit-neutral-400); + border-radius: 4px; + background-image: linear-gradient(var(--lit-cyea-400), var(--lit-cyea-400)); + background-repeat: no-repeat; +} + +input[type='range'].slider::-webkit-slider-runnable-track { + -webkit-appearance: none; + height: 10px; + color: var(--lit-cyea-400); +} + +input[type='range'].slider::-webkit-slider-thumb { + -webkit-appearance: none; + width: 16px; + height: 16px; + margin-top: -3px; + border-radius: 50%; + cursor: grab; + background: var(--lit-cyea-600); + box-shadow: 0 0 2px 0 var(--lit-neutral-900); +} + +/** Slider styles for FF*/ +input[type="range"].slider::-moz-range-progress { + background-color: var(--lit-cyea-400); +} + +input[type="range"].slider::-moz-range-thumb { + background-color: var(--lit-cyea-600); +} + +input[type="range"].slider::-moz-range-track { + background-color: var(--lit-neutral-400); +} + +/** Slider styles for IE */ +input[type="range"].slider::-ms-fill-lower { + background-color: var(--lit-cyea-400); +} + +input[type="range"].slider::-ms-fill-upper { + background-color: var(--lit-neutral-400); +} + +input[type="range"].slider::-ms-thumb { + background-color: var(--lit-cyea-600); +} diff --git a/lit_nlp/client/elements/slider.ts b/lit_nlp/client/elements/slider.ts new file mode 100644 index 00000000..a2e2b255 --- /dev/null +++ b/lit_nlp/client/elements/slider.ts @@ -0,0 +1,60 @@ +/** + * @fileoverview An input[type="range"] slider with LIT Brand-compliant styles + * + * @license + * Copyright 2020 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import {property} from 'lit/decorators'; +import {customElement} from 'lit/decorators'; +import {html, LitElement} from 'lit'; +import {styleMap} from 'lit/directives/style-map'; + +import {styles} from './slider.css'; +import {styles as sharedStyles} from '../lib/shared_styles.css'; + +/** A slider with LIT Brand-compliant styles. */ +@customElement('lit-slider') +export class Slider extends LitElement { + @property({type: Number}) min = 0; + @property({type: Number}) max = 1; + @property({type: Number}) step = 0.1; + @property({type: Number}) val = 0.5; + @property({attribute: false}) onChange = () => {}; + @property({attribute: false}) onInput = (e:Event) => { + const input = e.target as HTMLInputElement; + this.val = +input.value; + }; + + static override get styles() { + return [sharedStyles, styles]; + } + + override render () { + const normalizedValue = (this.val - this.min) / (this.max - this.min); + const styles = {'background-size': `${normalizedValue * 100}% 100%`}; + return html``; + } +} + +declare global { + interface HTMLElementTagNameMap { + 'lit-slider': Slider; + } +} diff --git a/lit_nlp/client/elements/table.css b/lit_nlp/client/elements/table.css index aed7f8d3..14360ad7 100644 --- a/lit_nlp/client/elements/table.css +++ b/lit_nlp/client/elements/table.css @@ -125,19 +125,19 @@ * Body styles */ tbody tr:hover { - background: #f8f9fa; + background: var(--lit-mage-200); } tbody tr.selected { - background: #c5e4e8; + background: var(--lit-cyea-100); color: var(--lit-gray-800); transition-duration: 0.2s; transition-property: background-color; } tbody tr.primary-selected { - background: #9bb7ba; - color: #142838; + background: var(--lit-cyea-300); + color: black; transition-duration: 0.2s; transition-property: background-color; } @@ -213,40 +213,13 @@ tbody td { min-width: 16px; } -/* TODO(lit-dev): Move this to shared_styles and ensure elements can use shared - * styles - */ /** - * For standalone MWC icons as buttons. - * We don't use mwc-icon-button because this adds - * a large backdrop and extra whitespace. + * For standalone MWC icons as buttons. We don't use mwc-icon-button because it + * adds a large backdrop and extra whitespace. */ -mwc-icon.icon-button { - height: 16px; - width: 16px; - min-width: 16px; - --mdc-icon-size: 16px; - cursor: pointer; - user-select: none; -} - -mwc-icon.icon-button:hover { - opacity: .7; +.icon-button:hover { + color: var(--lit-neutral-900); } - -mwc-icon.mdi-outlined { - --mdc-icon-font: "Material Icons Outlined"; -} - -mwc-icon.disabled { - cursor: default; - color: rgba(60, 64, 67, 0.38); -} - -mwc-icon.disabled:hover { - opacity: 1; -} - -mwc-icon.button-extra-margin { +.button-extra-margin { margin-left: 4px; } diff --git a/lit_nlp/client/elements/table.ts b/lit_nlp/client/elements/table.ts index b66c3c28..8a00e2ef 100644 --- a/lit_nlp/client/elements/table.ts +++ b/lit_nlp/client/elements/table.ts @@ -36,6 +36,7 @@ import {ReactiveElement} from '../lib/elements'; import {formatForDisplay} from '../lib/types'; import {isNumber, randInt} from '../lib/utils'; +import {styles as sharedStyles} from '../lib/shared_styles.css'; import {styles} from './table.css'; type SortableTableEntry = string|number; @@ -109,6 +110,9 @@ export class DataTable extends ReactiveElement { @observable @property({type: Boolean}) searchEnabled: boolean = false; @observable @property({type: Boolean}) paginationEnabled: boolean = false; + // Style overrides + @property({type: Boolean}) verticalAlignMiddle: boolean = false; + // Callbacks @property({type: Object}) onClick: OnPrimarySelectCallback|undefined; @property({type: Object}) onHover: OnHoverCallback|undefined; @@ -116,7 +120,7 @@ export class DataTable extends ReactiveElement { @property({type: Object}) onPrimarySelect: OnPrimarySelectCallback = () => {}; static override get styles() { - return [styles]; + return [sharedStyles, styles]; } // Sort order precedence: 1) sortName, 2) input order @@ -830,11 +834,17 @@ export class DataTable extends ReactiveElement { const cellClasses = this.columnHeaders.map( h => classMap({'cell-holder': true, 'right-align': h.rightAlign!})); + const cellStyles = styleMap({ + verticalAlign: this.verticalAlignMiddle ? 'middle' : 'top' + }); // clang-format off return html` - ${data.rowData.map((d, i) => html`
    ${formatCellContents(d)}
    `)} + ${data.rowData.map((d, i) => + html`
    ${ + formatCellContents(d) + }
    `)} `; // clang-format on diff --git a/lit_nlp/client/elements/tcav_score_bar.ts b/lit_nlp/client/elements/tcav_score_bar.ts index b6e4f6a3..89ef2e4b 100644 --- a/lit_nlp/client/elements/tcav_score_bar.ts +++ b/lit_nlp/client/elements/tcav_score_bar.ts @@ -37,6 +37,8 @@ export class TcavScoreBar extends LitElement { position: relative; display: flex; min-width: 100px; + min-height: 20px; + background-color: var(--lit-neutral-200); } .separator { @@ -47,39 +49,32 @@ export class TcavScoreBar extends LitElement { } .pos-bar { - background-color: #4ECDE6; - min-height: 20px; - } - - .pos-blank { - background-color: #E8EAED; min-height: 20px; + position: absolute; + background-color: var(--lit-cyea-300); } `; } override render() { - const score = this.score; - const clampVal = this.clampVal; - const meanVal = this.meanVal; - - const stylePosBlank: {[name: string]: string} = {}; - stylePosBlank['width'] = - `${(1 - Math.min(Math.max(score, 0), clampVal) / clampVal) * 100}%`; + const {score, clampVal, meanVal} = this; + const normalizedScore = Math.min(Math.max(score, 0), clampVal) / clampVal; + const normalizedMean = Math.min(Math.max(meanVal, 0), clampVal) / clampVal; - const stylePosBar: {[name: string]: string} = {}; - stylePosBar['width'] = - `${Math.min(Math.max(score, 0), clampVal) / clampVal * 100}%`; + const stylePosBar: {[name: string]: string} = { + 'width': `${Math.abs(normalizedScore - normalizedMean) * 100}%`, + 'left': normalizedScore > normalizedMean ? `${normalizedMean * 100}%` : + `${normalizedScore * 100}%` + }; - const styleSep: {[name: string]: string} = {}; - styleSep['left'] = - `${Math.min(Math.max(meanVal, 0), clampVal) / clampVal * 100}%`; + const styleSep: {[name: string]: string} = { + 'left': `${normalizedMean * 100}%` + }; return html`
    -
    -
    -
    `; +
    +
    `; } } diff --git a/lit_nlp/client/elements/threshold_slider.ts b/lit_nlp/client/elements/threshold_slider.ts index e1069938..6ec5568a 100644 --- a/lit_nlp/client/elements/threshold_slider.ts +++ b/lit_nlp/client/elements/threshold_slider.ts @@ -39,7 +39,8 @@ export class ThresholdSlider extends LitElement { .slider-row { display: flex; flex-wrap: wrap; - margin: 8px 5px; + align-items: start; + justify-content: center; } .text-with-controls { @@ -51,6 +52,9 @@ export class ThresholdSlider extends LitElement { } .slider-val { + color: var(--lit-neutral-600); + margin-top: -3px; /*Accounts for custom thumb offset in lit-slider*/ + margin-left: 2px; width: 30px; } @@ -58,7 +62,6 @@ export class ThresholdSlider extends LitElement { margin: 5px; padding: 5px 10px; } - `]; } @@ -112,6 +115,7 @@ export class ThresholdSlider extends LitElement { marginToVal: (margin: number) => number, title: string) { const val = marginToVal(margin); const isDefaultValue = margin === 0; + const reset = () => { const event = new CustomEvent('threshold-changed', { detail: { @@ -130,6 +134,7 @@ export class ThresholdSlider extends LitElement { 'text-no-controls': !this.showControls, 'slider-val': true }; + const renderLabel = () => { if (this.showControls) { return html` @@ -138,12 +143,12 @@ export class ThresholdSlider extends LitElement { return null; } }; + return html`
    ${renderLabel()} - +
    ${val}
    ${this.showControls ? html`