@@ -455,6 +511,8 @@ import {
} from '@/api';
import { CustomChatModel, CustomGenerativeModel } from "../services/custom_llm";
+import {Popover} from "bootstrap";
+
Vue.use(VueTippy);
export default {
@@ -489,6 +547,10 @@ export default {
},
data: () => ({
+ sensitivity_popover_title: "What is Sensitivity?",
+ sensitivity_popover_content: "Sensitivity is a measure of how much a model's output changes when the input is changed slightly. A model with high sensitivity will give very different outputs for similar inputs, while a model with low sensitivity will give similar outputs for similar inputs. Sensitivity is a useful metric for understanding how a model will behave in the real world, where inputs are never exactly the same.",
+ sensitivity_popover: null,
+
chatModel: null,
chatText: "",
messages: [],
@@ -515,7 +577,7 @@ export default {
},
chatConfig: {
- chatMode: "normal_chat",
+ chatMode: "sensitivity",
selectedModel: "gpt-3.5-turbo-0613",
temperature: 0.7,
maxTokens: 256,
@@ -636,6 +698,15 @@ export default {
},
methods: {
+ showPopover(event) {
+ // this will create a popover once and then it will be reused by Popover automatically
+ if (!this.sensitivity_popover) {
+ this.sensitivity_popover = new Popover(event.target, {
+ trigger: 'focus'
+ });
+ this.sensitivity_popover.show();
+ }
+ },
autoResizeTextarea () {
const textArea = this.$refs.textAreaRef;
@@ -1403,4 +1474,38 @@ button:disabled {
-ms-user-select: none; /* Internet Explorer/Edge */
user-select: none; /* Non-prefixed version, currently supported by Chrome, Opera, and W3C */
}
+
+.rounded-top-left-1 {
+ border-top-left-radius: 0.3rem;
+}
+.rounded-top-right-1 {
+ border-top-right-radius: 0.3rem;
+}
+.rounded-bottom-left-1 {
+ border-bottom-left-radius: 0.3rem;
+}
+.rounded-bottom-right-1 {
+ border-bottom-right-radius: 0.3rem;
+}
+.custom-radius {
+ border-radius: 0rem;
+}
+
+.btn-outline-secondary:hover{
+ background-color: #dededeff;
+}
+
+.btn-outline-secondary:active{
+ background-color: #dededeff;
+ color: #000;
+}
+
+.sensitivity-popover {
+ border-bottom-right-radius: 0.3rem;
+ width: 100%;
+ height: 100%;
+ border-color: #dededeff;
+ color: #000;
+ cursor: help;
+}
diff --git a/model-inference/model_inference/tasks/attacks/hotflip.py b/model-inference/model_inference/tasks/attacks/hotflip.py
index 0e2df464c..87aa9fb50 100644
--- a/model-inference/model_inference/tasks/attacks/hotflip.py
+++ b/model-inference/model_inference/tasks/attacks/hotflip.py
@@ -227,7 +227,9 @@ def attack_instance(
new_contexts = []
old_context = [" ".join([word for word in processed_context])]
tmp_context = processed_context
- for value in range(self.top_k):
+ num_replacements = min(self.top_k, len(new_imp_token_idx)) # Use the smaller of self.top_k or length of new_imp_token_idx
+
+ for value in range(num_replacements):
token_to_replace = replacement_tokens[value]
token_idx = new_imp_token_idx[value]
tmp_context[token_idx] = token_to_replace
@@ -236,7 +238,7 @@ def attack_instance(
all_contexts = old_context
all_contexts.extend(new_contexts)
- questions = [" ".join([w for w in processed_question])] * (self.top_k + 1)
+ questions = [" ".join([w for w in processed_question])] * (num_replacements + 1)
prepared_inputs = [[q, c] for q, c in zip(questions, all_contexts)]
batch_request = self.base_prediction_request