Skip to content

Commit

Permalink
UX: Use new DStatTiles reusable component from core (#1025)
Browse files Browse the repository at this point in the history
For the Spam and Usage tabs in admin
  • Loading branch information
martin-brennan authored Dec 16, 2024
1 parent 94b85ec commit 222e2cf
Show file tree
Hide file tree
Showing 5 changed files with 65 additions and 122 deletions.
33 changes: 15 additions & 18 deletions assets/javascripts/discourse/components/ai-spam.gjs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import { action } from "@ember/object";
import { LinkTo } from "@ember/routing";
import { service } from "@ember/service";
import DButton from "discourse/components/d-button";
import DStatTiles from "discourse/components/d-stat-tiles";
import DToggleSwitch from "discourse/components/d-toggle-switch";
import DTooltip from "discourse/components/d-tooltip";
import withEventValue from "discourse/helpers/with-event-value";
Expand Down Expand Up @@ -121,7 +122,7 @@ export default class AiSpam extends Component {

get metrics() {
const detected = {
label: "discourse_ai.spam.spam_detected",
label: i18n("discourse_ai.spam.spam_detected"),
value: this.stats.spam_detected,
};
if (this.args.model.flagging_username) {
Expand All @@ -131,17 +132,19 @@ export default class AiSpam extends Component {
}
return [
{
label: "discourse_ai.spam.scanned_count",
label: i18n("discourse_ai.spam.scanned_count"),
value: this.stats.scanned_count,
},
detected,
{
label: "discourse_ai.spam.false_positives",
label: i18n("discourse_ai.spam.false_positives"),
value: this.stats.false_positives,
tooltip: i18n("discourse_ai.spam.stat_tooltips.incorrectly_flagged"),
},
{
label: "discourse_ai.spam.false_negatives",
label: i18n("discourse_ai.spam.false_negatives"),
value: this.stats.false_negatives,
tooltip: i18n("discourse_ai.spam.stat_tooltips.missed_spam"),
},
];
}
Expand Down Expand Up @@ -220,22 +223,16 @@ export default class AiSpam extends Component {
class="ai-spam__stats"
>
<:content>
<div class="ai-spam__metrics">
<DStatTiles as |tiles|>
{{#each this.metrics as |metric|}}
<div class="ai-spam__metrics-item">
<span class="ai-spam__metrics-label">{{i18n
metric.label
}}</span>
{{#if metric.href}}
<a href={{metric.href}} class="ai-spam__metrics-value">
{{metric.value}}
</a>
{{else}}
<span class="ai-spam__metrics-value">{{metric.value}}</span>
{{/if}}
</div>
<tiles.Tile
@label={{metric.label}}
@url={{metric.href}}
@value={{metric.value}}
@tooltip={{metric.tooltip}}
/>
{{/each}}
</div>
</DStatTiles>
</:content>
</AdminConfigAreaCard>
</div>
Expand Down
88 changes: 41 additions & 47 deletions assets/javascripts/discourse/components/ai-usage.gjs
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@ import { service } from "@ember/service";
import { eq, gt, lt } from "truth-helpers";
import ConditionalLoadingSpinner from "discourse/components/conditional-loading-spinner";
import DButton from "discourse/components/d-button";
import DStatTiles from "discourse/components/d-stat-tiles";
import DateTimeInputRange from "discourse/components/date-time-input-range";
import avatar from "discourse/helpers/avatar";
import concatClass from "discourse/helpers/concat-class";
Expand Down Expand Up @@ -124,6 +125,36 @@ export default class AiUsage extends Component {
return normalized;
}

get metrics() {
return [
{
label: i18n("discourse_ai.usage.total_requests"),
value: this.data.summary.total_requests,
tooltip: i18n("discourse_ai.usage.stat_tooltips.total_requests"),
},
{
label: i18n("discourse_ai.usage.total_tokens"),
value: this.data.summary.total_tokens,
tooltip: i18n("discourse_ai.usage.stat_tooltips.total_tokens"),
},
{
label: i18n("discourse_ai.usage.request_tokens"),
value: this.data.summary.total_request_tokens,
tooltip: i18n("discourse_ai.usage.stat_tooltips.request_tokens"),
},
{
label: i18n("discourse_ai.usage.response_tokens"),
value: this.data.summary.total_response_tokens,
tooltip: i18n("discourse_ai.usage.stat_tooltips.response_tokens"),
},
{
label: i18n("discourse_ai.usage.cached_tokens"),
value: this.data.summary.total_cached_tokens,
tooltip: i18n("discourse_ai.usage.stat_tooltips.cached_tokens"),
},
];
}

get chartConfig() {
if (!this.data?.data) {
return;
Expand Down Expand Up @@ -344,53 +375,16 @@ export default class AiUsage extends Component {
class="ai-usage__summary"
>
<:content>
<div class="ai-usage__summary-stats">
<div class="ai-usage__summary-stat">
<span class="label">{{i18n
"discourse_ai.usage.total_requests"
}}</span>
<span
class="value"
title={{this.data.summary.total_requests}}
>{{number this.data.summary.total_requests}}</span>
</div>
<div class="ai-usage__summary-stat">
<span class="label">{{i18n
"discourse_ai.usage.total_tokens"
}}</span>
<span
class="value"
title={{this.data.summary.total_tokens}}
>{{number this.data.summary.total_tokens}}</span>
</div>
<div class="ai-usage__summary-stat">
<span class="label">{{i18n
"discourse_ai.usage.request_tokens"
}}</span>
<span
class="value"
title={{this.data.summary.total_request_tokens}}
>{{number this.data.summary.total_request_tokens}}</span>
</div>
<div class="ai-usage__summary-stat">
<span class="label">{{i18n
"discourse_ai.usage.response_tokens"
}}</span>
<span
class="value"
title={{this.data.summary.total_response_tokens}}
>{{number this.data.summary.total_response_tokens}}</span>
</div>
<div class="ai-usage__summary-stat">
<span class="label">{{i18n
"discourse_ai.usage.cached_tokens"
}}</span>
<span
class="value"
title={{this.data.summary.total_cached_tokens}}
>{{number this.data.summary.total_cached_tokens}}</span>
</div>
</div>
<DStatTiles as |tiles|>
{{#each this.metrics as |metric|}}
<tiles.Tile
@label={{metric.label}}
@href={{metric.href}}
@value={{metric.value}}
@tooltip={{metric.tooltip}}
/>
{{/each}}
</DStatTiles>
</:content>
</AdminConfigAreaCard>

Expand Down
31 changes: 0 additions & 31 deletions assets/stylesheets/modules/llms/common/spam.scss
Original file line number Diff line number Diff line change
Expand Up @@ -45,37 +45,6 @@
&__stats {
margin-top: 2em;
}

&__stats-title {
margin-bottom: 1em;
}

&__metrics {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 1em;
margin-bottom: 2em;
}

&__metrics-item {
display: flex;
flex-direction: column;
padding: 1em;
background: var(--primary-very-low);
border-radius: 0.25em;
}

&__metrics-label {
color: var(--primary-medium);
font-size: 0.875em;
margin-bottom: 0.5em;
}

&__metrics-value {
color: var(--primary);
font-size: 1.5em;
font-weight: bold;
}
}

.spam-test-modal {
Expand Down
26 changes: 0 additions & 26 deletions assets/stylesheets/modules/llms/common/usage.scss
Original file line number Diff line number Diff line change
Expand Up @@ -68,32 +68,6 @@
font-size: 1.2em;
}

&__summary-stats {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(200px, 1fr));
gap: 1em;
}

&__summary-stat {
display: flex;
flex-direction: column;
padding: 1em;
background: var(--primary-very-low);
border-radius: 0.25em;

.label {
color: var(--primary-medium);
font-size: 0.875em;
margin-bottom: 0.5em;
}

.value {
color: var(--primary);
font-size: 1.5em;
font-weight: bold;
}
}

&__charts {
margin-top: 2em;
}
Expand Down
9 changes: 9 additions & 0 deletions config/locales/client.en.yml
Original file line number Diff line number Diff line change
Expand Up @@ -157,6 +157,9 @@ en:
run: "Run test"
spam: "Spam"
not_spam: "Not spam"
stat_tooltips:
incorrectly_flagged: "Items that the AI bot flagged as spam where moderators disagreed"
missed_spam: "Items flagged by the community as spam that were not detected by the AI bot, which moderators agreed with"

usage:
short_title: "Usage"
Expand All @@ -182,6 +185,12 @@ en:
no_models: "No model usage data found"
no_features: "No feature usage data found"
subheader_description: "Tokens are the basic units that LLMs use to understand and generate text, usage data may affect costs."
stat_tooltips:
total_requests: "All requests made to LLMs through Discourse"
total_tokens: "All the tokens used when prompting an LLM"
request_tokens: "Tokens used when the LLM tries to understand what you are saying"
response_tokens: "Tokens used when the LLM responds to your prompt"
cached_tokens: "Previously processed request tokens that the LLM reuses to optimize performance and cost"
periods:
last_day: "Last 24 hours"
last_week: "Last week"
Expand Down

0 comments on commit 222e2cf

Please sign in to comment.