From a0046c787f71c715f4b2c5f8e367ea9a7b34c17a Mon Sep 17 00:00:00 2001
From: James Wexler
Date: Mon, 5 Apr 2021 10:50:16 -0700
Subject: [PATCH] Update website source
PiperOrigin-RevId: 366828734
---
docs/demos/index.html | 22 +++++++++++-----------
docs/index.html | 1 +
docs/setup/index.html | 6 +++++-
website/src/demos.md | 5 +----
website/src/setup.md | 2 +-
5 files changed, 19 insertions(+), 17 deletions(-)
diff --git a/docs/demos/index.html b/docs/demos/index.html
index e2121aff..fdea024c 100644
--- a/docs/demos/index.html
+++ b/docs/demos/index.html
@@ -96,6 +96,17 @@
Use LIT with any of three tasks from the General Language Understanding Evaluation (GLUE) benchmark suite. This demo contains binary classification (for sentiment analysis, using SST2), multi-class classification (for textual entailment, using MultiNLI), and regression (for measuringtext similarity, using STS-B).
+
+
+
BERT binary classification notebooks
+
+
DATA SOURCES
+
+ Stanford Sentiment Treebank
+
+
Use LIT directly inside a Colab notebook. Explore binary classification for sentiment analysis using SST2 from the General Language Understanding Evaluation (GLUE) benchmark suite.
+
+
BERT coreference fairness Winogender
@@ -128,17 +139,6 @@
Use a T5 model to summarize text. For any example of interest, quickly find similar examples from the training set, using an approximate nearest-neighbors index.
-
-
-
-
Colab notebooks BERT binary classification
-
-
DATA SOURCES
-
- Stanford Sentiment Treebank
-
-
Use LIT directly inside a Colab notebook. Explore binary classification for sentiment analysis from the General Language Understanding Evaluation (GLUE) benchmark suite.
-
diff --git a/docs/index.html b/docs/index.html
index 68c6fb49..5908fc7c 100644
--- a/docs/index.html
+++ b/docs/index.html
@@ -134,6 +134,7 @@ Framework agnostic
TensorFlow 1.x
TensorFlow 2.x
PyTorch
+Notebook compatibility
Custom inference code
Remote Procedure Calls
And more...
diff --git a/docs/setup/index.html b/docs/setup/index.html
index e6bfa18a..990273dd 100644
--- a/docs/setup/index.html
+++ b/docs/setup/index.html
@@ -99,6 +99,8 @@ Install from source
Run the included demos
LIT ships with a number of demos that can easily be run after installation.
+LIT can be started on the command line and then viewed in a web browser.
+Alternatively, it can be run directly in a Colaboratory or Jupyter notebook and viewed in an output cell of the notebook.
Quick-start: Classification and regression
To explore classification and regression models tasks from the popular GLUE benchmark:
python -m lit_nlp.examples.glue_demo --port=5432 --quickstart
@@ -110,11 +112,13 @@ Quick-start: Classification and regression
STS-B or MultiNLI using the toolbar or the gear icon in
the upper right.
Language modeling
-python -m lit_nlp.examples.pretrained_lm_demo \
--models=bert-base-uncased --port=5432
+python -m lit_nlp.examples.lm_demo \
--models=bert-base-uncased --port=5432
In this demo, you can explore predictions from a pretrained language model (i.e. fill in the blanks).
Navigate to http://localhost:5432 for the UI.
More examples
The examples directory contains additional examples to explore, all of which can be run similarly to those above.
+Notebook usage
+A simple colab demo can be found here. Just run all the cells to see LIT on an example classification model right in the notebook.
Use LIT on your own models and data
diff --git a/website/src/demos.md b/website/src/demos.md
index c04f888d..e95fc4f6 100644
--- a/website/src/demos.md
+++ b/website/src/demos.md
@@ -19,7 +19,7 @@ color: "#49596c"
{% include partials/demo-card c-title: "Notebook usage", link: "https://colab.research.google.com/github/PAIR-code/lit/blob/main/lit_nlp/examples/notebooks/LIT_sentiment_classifier.ipynb",
c-data-source: "Stanford Sentiment Treebank"
- c-copy: "Use LIT directly inside of a colab notebook. This simple demo shows a binary classifier for sentiment analysis using SST2.", tags: "BERT, binary classification, notebook", external:"true" %}
+ c-copy: "Use LIT directly inside a Colab notebook. Explore binary classification for sentiment analysis using SST2 from the General Language Understanding Evaluation (GLUE) benchmark suite.", tags: "BERT, binary classification, notebooks", external:"true" %}
{% include partials/demo-card c-title: "Gender bias in coreference systems", link: "/demos/coref.html",
c-data-source: "Winogender schemas", c-copy: "Use LIT to explore gendered associations in a coreference system, which matches pronouns to their antecedents. This demo highlights how LIT can work with structured prediction models (edge classification), and its capability for disaggregated analysis.", tags: "BERT, coreference, fairness, Winogender", external:"true" %}
@@ -29,7 +29,4 @@ color: "#49596c"
{% include partials/demo-card c-title: "Text generation", link: "/demos/t5.html",
c-data-source: "CNN / Daily Mail", c-copy: "Use a T5 model to summarize text. For any example of interest, quickly find similar examples from the training set, using an approximate nearest-neighbors index.", tags: "T5, generation", external:"true" %}
-
- {% include partials/demo-card c-title: "Using LIT in notebooks", link: "https://colab.research.google.com/github/pair-code/lit/blob/main/examples/notebooks/LIT_sentiment_classifier.ipynb",
- c-data-source: "Stanford Sentiment Treebank", c-copy: "Use LIT directly inside a Colab notebook. Explore binary classification for sentiment analysis from the General Language Understanding Evaluation (GLUE) benchmark suite.", tags: "Colab, notebooks, BERT, binary classification", external:"true" %}
diff --git a/website/src/setup.md b/website/src/setup.md
index 1ae73e1c..91cc891f 100644
--- a/website/src/setup.md
+++ b/website/src/setup.md
@@ -89,7 +89,7 @@ the upper right.
## Language modeling
```bash
-python -m lit_nlp.examples.pretrained_lm_demo \
+python -m lit_nlp.examples.lm_demo \
--models=bert-base-uncased --port=5432
```