diff --git a/.github/workflows/backend-ci.yml b/.github/workflows/backend-ci.yml index 14ceb8ccc6..ec71a18809 100644 --- a/.github/workflows/backend-ci.yml +++ b/.github/workflows/backend-ci.yml @@ -38,7 +38,7 @@ jobs: - uses: actions/checkout@v3 - name: Set up Python - uses: actions/setup-python@v2 + uses: actions/setup-python@v5 with: python-version: '3.9' diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml index 9ca73150d1..2f7a20cd24 100644 --- a/.github/workflows/e2e.yml +++ b/.github/workflows/e2e.yml @@ -55,6 +55,7 @@ jobs: with: working-directory: tests browser: chrome + headed: true # necessary to reflect the correct behavior of media queries (e.g. in ComparisonInput) spec: "cypress/e2e/frontend/**/*" - name: Print dev-env logs diff --git a/backend/ml/inputs.py b/backend/ml/inputs.py index daba070b26..81e9604a68 100644 --- a/backend/ml/inputs.py +++ b/backend/ml/inputs.py @@ -2,9 +2,8 @@ from typing import Optional import pandas as pd -from django.db.models import Case, F, Q, QuerySet, When -from django.db.models.expressions import RawSQL -from solidago.pipeline import TournesolInput +from django.db.models import F, Q +from solidago.pipeline import PipelineInput from core.models import User from tournesol.models import ( @@ -12,59 +11,21 @@ ContributorRating, ContributorRatingCriteriaScore, ContributorScaling, - Entity, ) from vouch.models import Voucher -class MlInputFromDb(TournesolInput): - SCALING_CALIBRATION_MIN_ENTITIES_TO_COMPARE = 20 - +class MlInputFromDb(PipelineInput): def __init__(self, poll_name: str): self.poll_name = poll_name - def get_scaling_calibration_users(self) -> QuerySet[User]: - n_alternatives = ( - Entity.objects.filter(comparisons_entity_1__poll__name=self.poll_name) - .union(Entity.objects.filter(comparisons_entity_2__poll__name=self.poll_name)) - .count() - ) - users = User.objects.alias( - n_compared_entities=RawSQL( - """ - SELECT COUNT(DISTINCT e.id) - FROM tournesol_entity e - INNER JOIN tournesol_comparison c - ON (c.entity_1_id = e.id OR c.entity_2_id = e.id) - INNER JOIN tournesol_poll p - ON (p.id = c.poll_id AND p.name = %s) - WHERE c.user_id = "core_user"."id" - """, - (self.poll_name,), - ) - ) - if n_alternatives <= self.SCALING_CALIBRATION_MIN_ENTITIES_TO_COMPARE: - # The number of alternatives is low enough to consider as calibration users - # all trusted users who have compared all alternatives. - return users.filter( - is_active=True, - trust_score__gt=self.SCALING_CALIBRATION_MIN_TRUST_SCORE, - n_compared_entities__gte=n_alternatives, - ) - - return users.filter( - is_active=True, - trust_score__gt=self.SCALING_CALIBRATION_MIN_TRUST_SCORE, - n_compared_entities__gte=self.SCALING_CALIBRATION_MIN_ENTITIES_TO_COMPARE, - ).order_by("-n_compared_entities")[: self.MAX_SCALING_CALIBRATION_USERS] - - def get_comparisons(self, criteria=None, user_id=None) -> pd.DataFrame: + def get_comparisons(self, criterion=None, user_id=None) -> pd.DataFrame: scores_queryset = ComparisonCriteriaScore.objects.filter( comparison__poll__name=self.poll_name, comparison__user__is_active=True, ) - if criteria is not None: - scores_queryset = scores_queryset.filter(criteria=criteria) + if criterion is not None: + scores_queryset = scores_queryset.filter(criteria=criterion) if user_id is not None: scores_queryset = scores_queryset.filter(comparison__user_id=user_id) @@ -72,8 +33,8 @@ def get_comparisons(self, criteria=None, user_id=None) -> pd.DataFrame: values = scores_queryset.values( "score", "score_max", - "criteria", "weight", + criterion=F("criteria"), entity_a=F("comparison__entity_1_id"), entity_b=F("comparison__entity_2_id"), user_id=F("comparison__user_id"), @@ -81,7 +42,7 @@ def get_comparisons(self, criteria=None, user_id=None) -> pd.DataFrame: if len(values) > 0: dtf = pd.DataFrame(values) return dtf[ - ["user_id", "entity_a", "entity_b", "criteria", "score", "score_max", "weight"] + ["user_id", "entity_a", "entity_b", "criterion", "score", "score_max", "weight"] ] return pd.DataFrame( @@ -89,7 +50,7 @@ def get_comparisons(self, criteria=None, user_id=None) -> pd.DataFrame: "user_id", "entity_a", "entity_b", - "criteria", + "criterion", "score", "score_max", "weight", @@ -100,24 +61,12 @@ def get_comparisons(self, criteria=None, user_id=None) -> pd.DataFrame: def ratings_properties(self): # This makes sure that `get_scaling_calibration_users()` is evaluated separately, as the # table names mentionned in its RawSQL query could conflict with the current queryset. - scaling_calibration_user_ids = list(self.get_scaling_calibration_users().values_list("id")) - values = ( - ContributorRating.objects.filter( - poll__name=self.poll_name, - ) - .annotate( - is_scaling_calibration_user=Case( - When(user__in=scaling_calibration_user_ids, then=True), - default=False, - ), - ) - .values( - "user_id", - "entity_id", - "is_public", - "is_scaling_calibration_user", - trust_score=F("user__trust_score"), - ) + values = ContributorRating.objects.filter( + poll__name=self.poll_name, + ).values( + "user_id", + "entity_id", + "is_public", ) if len(values) == 0: return pd.DataFrame( @@ -125,8 +74,6 @@ def ratings_properties(self): "user_id", "entity_id", "is_public", - "is_scaling_calibration_user", - "trust_score", ] ) return pd.DataFrame(values) @@ -136,7 +83,7 @@ def get_user_scalings(self, user_id=None) -> pd.DataFrame: Returns: - ratings_df: DataFrame with columns * `user_id`: int - * `criteria`: str + * `criterion`: str * `scale`: float * `scale_uncertainty`: float * `translation`: float @@ -148,17 +95,18 @@ def get_user_scalings(self, user_id=None) -> pd.DataFrame: scalings = scalings.filter(user_id=user_id) values = scalings.values( "user_id", - "criteria", "scale", "scale_uncertainty", "translation", "translation_uncertainty", + criterion=F("criteria"), + ) if len(values) == 0: return pd.DataFrame( columns=[ "user_id", - "criteria", + "criterion", "scale", "scale_uncertainty", "translation", @@ -168,28 +116,28 @@ def get_user_scalings(self, user_id=None) -> pd.DataFrame: return pd.DataFrame(values) def get_individual_scores( - self, criteria: Optional[str] = None, user_id: Optional[int] = None + self, user_id: Optional[int] = None, criterion: Optional[str] = None, ) -> pd.DataFrame: scores_queryset = ContributorRatingCriteriaScore.objects.filter( contributor_rating__poll__name=self.poll_name, contributor_rating__user__is_active=True, ) - if criteria is not None: - scores_queryset = scores_queryset.filter(criteria=criteria) + if criterion is not None: + scores_queryset = scores_queryset.filter(criteria=criterion) if user_id is not None: scores_queryset = scores_queryset.filter(contributor_rating__user_id=user_id) values = scores_queryset.values( "raw_score", - "criteria", - entity=F("contributor_rating__entity_id"), + criterion=F("criteria"), + entity_id=F("contributor_rating__entity_id"), user_id=F("contributor_rating__user_id"), ) if len(values) == 0: - return pd.DataFrame(columns=["user_id", "entity", "criteria", "raw_score"]) + return pd.DataFrame(columns=["user_id", "entity_id", "criterion", "raw_score"]) dtf = pd.DataFrame(values) - return dtf[["user_id", "entity", "criteria", "raw_score"]] + return dtf[["user_id", "entity_id", "criterion", "raw_score"]] def get_vouches(self): values = Voucher.objects.filter( diff --git a/backend/ml/management/commands/ml_train.py b/backend/ml/management/commands/ml_train.py index b345613f45..41e78ebfc0 100644 --- a/backend/ml/management/commands/ml_train.py +++ b/backend/ml/management/commands/ml_train.py @@ -1,5 +1,6 @@ import os from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed +from functools import cache from django import db from django.conf import settings @@ -18,6 +19,7 @@ from tournesol.models.poll import ALGORITHM_MEHESTAN, DEFAULT_POLL_NAME +@cache def get_solidago_pipeline(run_trust_propagation: bool = True): if run_trust_propagation: trust_algo = LipschiTrust() diff --git a/backend/ml/mehestan/parameters.py b/backend/ml/mehestan/parameters.py deleted file mode 100644 index e31e6de255..0000000000 --- a/backend/ml/mehestan/parameters.py +++ /dev/null @@ -1,7 +0,0 @@ -from solidago.pipeline.legacy2023.parameters import PipelineParameters - -from tournesol.utils.constants import MEHESTAN_MAX_SCALED_SCORE - - -class MehestanParameters(PipelineParameters): - max_squashed_score = MEHESTAN_MAX_SCALED_SCORE diff --git a/backend/ml/mehestan/run.py b/backend/ml/mehestan/run.py deleted file mode 100644 index abfe80aef0..0000000000 --- a/backend/ml/mehestan/run.py +++ /dev/null @@ -1,108 +0,0 @@ -import logging -import os -from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed - -from django import db -from django.conf import settings -from solidago.pipeline import TournesolInput -from solidago.pipeline.legacy2023.criterion_pipeline import run_pipeline_for_criterion -from solidago.pipeline.legacy2023.individual_scores import get_individual_scores - -from core.models import User -from ml.inputs import MlInputFromDb -from ml.outputs import TournesolPollOutput, save_tournesol_scores -from tournesol.models import Poll - -from .parameters import MehestanParameters - -logger = logging.getLogger(__name__) - - -def update_user_scores(poll: Poll, user: User): - params = MehestanParameters() - ml_input = MlInputFromDb(poll_name=poll.name) - for criteria in poll.criterias_list: - output = TournesolPollOutput(poll_name=poll.name, criterion=criteria) - scores = get_individual_scores( - ml_input, - criteria, - parameters=params, - single_user_id=user.pk, - ) - scores["criteria"] = criteria - scores.rename( - columns={ - "score": "raw_score", - "uncertainty": "raw_uncertainty", - }, - inplace=True, - ) - output.save_individual_scores(scores, single_user_id=user.pk) - - -def close_db_connection_callback(): - db.connection.close() - - -def run_mehestan( - ml_input: TournesolInput, poll: Poll, parameters: MehestanParameters, main_criterion_only=False -): - """ - This function use multiprocessing. - - 1. Always close all database connections in the main process before - creating forks. Django will automatically re-create new database - connections when needed. - - 2. Do not pass Django model's instances as arguments to the function - run by child processes. Using such instances in child processes - will raise an exception: connection already closed. - - 3. Do not fork the main process within a code block managed by - a single database transaction. - - See the indications to close the database connections: - - https://www.psycopg.org/docs/usage.html#thread-and-process-safety - - https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNECT - - See how django handles database connections: - - https://docs.djangoproject.com/en/4.0/ref/databases/#connection-management - """ - logger.info("Mehestan for poll '%s': Start", poll.name) - - criteria = poll.criterias_list - - criteria_to_run = [poll.main_criteria] - if not main_criterion_only: - criteria_to_run.extend(c for c in criteria if c != poll.main_criteria) - - if settings.MEHESTAN_MULTIPROCESSING: - # compute each criterion in parallel - cpu_count = os.cpu_count() or 1 - cpu_count -= settings.MEHESTAN_KEEP_N_FREE_CPU - os.register_at_fork(before=db.connections.close_all) - executor = ProcessPoolExecutor(max_workers=max(1, cpu_count)) - else: - # In tests, we might prefer to use a single thread to reduce overhead - # of multiple processes, db connections, and redundant numba compilation - executor = ThreadPoolExecutor(max_workers=1) - - with executor: - futures = [ - executor.submit( - run_pipeline_for_criterion, - criterion=crit, - input=ml_input, - parameters=parameters, - output=TournesolPollOutput(poll_name=poll.name, criterion=crit), - # The callback fixes a warning about unclosed connections to test database - done_callback=close_db_connection_callback, - ) - for crit in criteria_to_run - ] - for fut in as_completed(futures): - # reraise potential exception - fut.result() - - save_tournesol_scores(poll) - logger.info("Mehestan for poll '%s': Done", poll.name) diff --git a/backend/ml/outputs.py b/backend/ml/outputs.py index 32e5c02e23..84f3ce7854 100644 --- a/backend/ml/outputs.py +++ b/backend/ml/outputs.py @@ -6,7 +6,6 @@ import numpy as np import pandas as pd from django.db import transaction -from solidago.pipeline.legacy2023.global_scores import get_squash_function from solidago.pipeline.outputs import PipelineOutput from core.models import User @@ -21,9 +20,6 @@ ) from tournesol.models.poll import ALGORITHM_MEHESTAN -from .inputs import MlInputFromDb -from .mehestan.parameters import MehestanParameters - logger = logging.getLogger(__name__) @@ -40,8 +36,8 @@ def __init__( @cached_property def poll(self) -> Poll: - # Retrieving the poll instance lazily allows to be use this instance - # in a forked process. See the function `run_mehestan()`. + # Retrieving the poll instance lazily allows to use this instance + # in a forked process (e.g with multiprocessing). return Poll.objects.get(name=self.poll_name) def save_trust_scores(self, trusts: pd.DataFrame): @@ -92,11 +88,6 @@ def save_individual_scores( scores: pd.DataFrame, single_user_id: Optional[int] = None, ): - if "score" not in scores: - # Scaled "score" and "uncertainty" need to be computed - # based on raw_score and raw_uncertainty - scores = apply_score_scalings(self.poll, scores) - if "voting_right" not in scores: # Row contains `voting_right` when it comes from a full ML run, but not in the # case of online individual updates. As online updates do not update the @@ -166,7 +157,7 @@ def save_individual_scores( raw_uncertainty=row.raw_uncertainty, voting_right=row.voting_right, ) - for _, row in scores.iterrows() + for row in scores.itertuples() ), batch_size=10000, ) @@ -246,58 +237,3 @@ def entities_iterator(): [ent.single_poll_rating for ent in batch], fields=["tournesol_score"], ) - - -def apply_score_scalings(poll: Poll, contributor_scores: pd.DataFrame): - """ - Apply individual and poll-level scalings based on input "raw_score", and "raw_uncertainty". - - Params: - poll: Poll, - contributor_scores: DataFrame with columns: - user_id: int - entity_id: int - criteria: str - raw_score: float - raw_uncertainty: float - - Returns: - DataFrame with additional columns "score" and "uncertainty". - """ - if poll.algorithm != ALGORITHM_MEHESTAN: - contributor_scores["score"] = contributor_scores["raw_score"] - contributor_scores["uncertainty"] = contributor_scores["raw_uncertainty"] - return contributor_scores - - ml_input = MlInputFromDb(poll_name=poll.name) - scalings = ml_input.get_user_scalings().set_index(["user_id", "criteria"]) - contributor_scores = contributor_scores.join( - scalings, on=["user_id", "criteria"], how="left" - ) - contributor_scores["scale"].fillna(1, inplace=True) - contributor_scores["translation"].fillna(0, inplace=True) - contributor_scores["scale_uncertainty"].fillna(0, inplace=True) - contributor_scores["translation_uncertainty"].fillna(0, inplace=True) - - # Apply individual scaling - contributor_scores["uncertainty"] = ( - contributor_scores["scale"] * contributor_scores["raw_uncertainty"] - + contributor_scores["scale_uncertainty"] - * contributor_scores["raw_score"].abs() - + contributor_scores["translation_uncertainty"] - ) - contributor_scores["score"] = ( - contributor_scores["raw_score"] * contributor_scores["scale"] - + contributor_scores["translation"] - ) - - # Apply score squashing - squash_function = get_squash_function(MehestanParameters()) - contributor_scores["uncertainty"] = 0.5 * ( - squash_function(contributor_scores["score"] + contributor_scores["uncertainty"]) - - squash_function( - contributor_scores["score"] - contributor_scores["uncertainty"] - ) - ) - contributor_scores["score"] = squash_function(contributor_scores["score"]) - return contributor_scores diff --git a/backend/requirements.txt b/backend/requirements.txt index 92ddc809be..5ac49a87d5 100644 --- a/backend/requirements.txt +++ b/backend/requirements.txt @@ -3,7 +3,7 @@ # Full stack Web Framework used for Tournesol's backend # https://docs.djangoproject.com -Django==4.2.16 +Django==4.2.17 # Used for fields computed on save for Django models # See https://github.com/brechin/django-computed-property/ django-computed-property==0.3.0 @@ -37,14 +37,14 @@ PyYAML==6.0.1 langdetect==1.0.9 # Pandas is used extensively in the ML algorithms and for some data management # tasks such as building the public dataset -pandas==2.1.2 +pandas==2.2.3 # Numba provides just-in-time compilation to run optimized machine code # for performance-critical functions in Mehestan implementation. -numba==0.58.1 +numba==0.60.0 # Numpy is used extensively in the ML algorithms and in some other algorithms # such as computing comparison suggestions. See https://numpy.org/ # Check the compatibility with Numba before upgrading. -numpy==1.26.1 +numpy==1.26.4 # Scipy is used in some ML algorithms scipy==1.11.3 # API Youtube data diff --git a/backend/tournesol/admin.py b/backend/tournesol/admin.py index 8542ec4df4..b92c5d3877 100644 --- a/backend/tournesol/admin.py +++ b/backend/tournesol/admin.py @@ -264,6 +264,7 @@ class ComparisonAdmin(admin.ModelAdmin): "entity_1", "entity_2", "poll", + "user", ) raw_id_fields = ( "user", @@ -290,13 +291,34 @@ def get_poll_name(self, obj): return obj.poll.name +class ScoreMaxListFilter(admin.SimpleListFilter): + title = _("score max") + parameter_name = "score_max" + relevant_score_max = (2, 10) + + def lookups(self, request, model_admin): + return [(score_max, score_max) for score_max in self.relevant_score_max] + + def queryset(self, request, queryset): + if self.value() is None: + return queryset + + try: + if int(self.value()) in self.relevant_score_max: + return queryset.filter( + score_max=int(self.value()), + ) + except ValueError: + pass + return queryset + + @admin.register(ComparisonCriteriaScore) class ComparisonCriteriaScoreAdmin(admin.ModelAdmin): - list_filter = ("comparison__poll__name",) - list_display = ("id", "comparison", "criteria", "score") + list_filter = ("comparison__poll__name", ScoreMaxListFilter, "criteria") + list_display = ("id", "comparison", "criteria", "score_max", "score") readonly_fields = ("comparison",) search_fields = ( - "criteria", "comparison__entity_1__uid", "comparison__entity_2__uid", ) diff --git a/backend/tournesol/lib/public_dataset.py b/backend/tournesol/lib/public_dataset.py index 58d8e3ef16..8077077006 100644 --- a/backend/tournesol/lib/public_dataset.py +++ b/backend/tournesol/lib/public_dataset.py @@ -10,14 +10,14 @@ from datetime import datetime from typing import Optional +import solidago from django.conf import settings from django.db.models import QuerySet from django.utils import timezone -from ml.mehestan.run import MehestanParameters +from ml.management.commands.ml_train import get_solidago_pipeline from tournesol.entities.base import UID_DELIMITER from vouch.models import Voucher -from vouch.trust_algo import SINK_VOUCH, TRUSTED_EMAIL_PRETRUST, VOUCH_DECAY # The standard decimal precision of floating point numbers appearing in the # dataset. Very small numbers can use a higher precision. @@ -245,28 +245,15 @@ def write_metadata_file(write_target, data_until: datetime) -> None: Write the metadata as JSON in `write_target`, an object supporting the Python file API. """ - mehestan_params = MehestanParameters() - + solidago_pipeline = get_solidago_pipeline() metadata_dict = { "data_included_until": data_until.isoformat(), "generated_by": settings.MAIN_URL, "tournesol_version": settings.TOURNESOL_VERSION, "license": "ODC-By-1.0", - "algorithms_parameters": { - "byztrust": { - "SINK_VOUCH": SINK_VOUCH, - "TRUSTED_EMAIL_PRETRUST": TRUSTED_EMAIL_PRETRUST, - "VOUCH_DECAY": VOUCH_DECAY, - }, - "individual_scores": mehestan_params.indiv_algo.get_metadata(), - "mehestan": { - "W": mehestan_params.W, - "VOTE_WEIGHT_PUBLIC_RATINGS": mehestan_params.vote_weight_public_ratings, - "VOTE_WEIGHT_PRIVATE_RATINGS": mehestan_params.vote_weight_private_ratings, - "OVER_TRUST_BIAS": mehestan_params.over_trust_bias, - "OVER_TRUST_SCALE": mehestan_params.over_trust_scale, - "MAX_SCALED_SCORE": mehestan_params.max_squashed_score, - }, + "solidago": { + "version": solidago.__version__, + "pipeline": solidago_pipeline.to_json() }, } json.dump(metadata_dict, write_target, indent=2) diff --git a/backend/tournesol/locale/fr/LC_MESSAGES/django.po b/backend/tournesol/locale/fr/LC_MESSAGES/django.po index 4d78fcb6ff..72cd4d9965 100644 --- a/backend/tournesol/locale/fr/LC_MESSAGES/django.po +++ b/backend/tournesol/locale/fr/LC_MESSAGES/django.po @@ -8,7 +8,7 @@ msgid "" msgstr "" "Project-Id-Version: PACKAGE VERSION\n" "Report-Msgid-Bugs-To: \n" -"POT-Creation-Date: 2024-08-06 08:43+0000\n" +"POT-Creation-Date: 2024-11-14 15:16+0000\n" "PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" "Last-Translator: FULL NAME \n" "Language-Team: LANGUAGE \n" @@ -23,15 +23,19 @@ msgstr "" msgid "Successfully refreshed the metadata of %(count)s entities." msgstr "Les métadonnées de %(count)s entités ont été mises à jour." -#: tournesol/admin.py:372 +#: tournesol/admin.py:294 +msgid "score max" +msgstr "score max" + +#: tournesol/admin.py:393 msgid "has text?" msgstr "contient du texte?" -#: tournesol/admin.py:377 +#: tournesol/admin.py:398 msgid "Yes" msgstr "Oui" -#: tournesol/admin.py:378 +#: tournesol/admin.py:399 msgid "No" msgstr "Non" @@ -49,8 +53,8 @@ msgid "" "The absolute value of the score %(score)s given to the criterion " "%(criterion)s can't be greater than the value of score_max %(score_max)s." msgstr "" -"La valeur absolue du score %(score)s donnée au critère " -"%(criterion)s ne peut pas être supérieure à la valeur de score_max %(score_max)s." +"La valeur absolue du score %(score)s donnée au critère %(criterion)s ne peut " +"pas être supérieure à la valeur de score_max %(score_max)s." #: tournesol/serializers/rate_later.py:60 msgid "The entity is already in the rate-later list of this poll." diff --git a/backend/tournesol/management/commands/load_public_dataset.py b/backend/tournesol/management/commands/load_public_dataset.py index 095e0efc8b..dd91e55242 100644 --- a/backend/tournesol/management/commands/load_public_dataset.py +++ b/backend/tournesol/management/commands/load_public_dataset.py @@ -6,7 +6,7 @@ from django.core.management import call_command from django.core.management.base import BaseCommand from django.db import transaction -from solidago.pipeline.inputs import TournesolInputFromPublicDataset +from solidago.pipeline.inputs import TournesolDataset from core.models import User from core.models.user import EmailDomain @@ -27,7 +27,7 @@ def add_arguments(self, parser): parser.add_argument("--user-sampling", type=float, default=None) parser.add_argument("--dataset-url", type=str, default=PUBLIC_DATASET_URL) - def create_user(self, username: str, ml_input: TournesolInputFromPublicDataset): + def create_user(self, username: str, ml_input: TournesolDataset): user = ml_input.users.loc[ml_input.users.public_username == username].iloc[0] is_pretrusted = user.trust_score > 0.5 email = f"{username}@trusted.example" if is_pretrusted else f"{username}@example.com" @@ -66,7 +66,7 @@ def create_test_user(self): ) def handle(self, *args, **options): - public_dataset = TournesolInputFromPublicDataset(options["dataset_url"]) + public_dataset = TournesolDataset(options["dataset_url"]) nb_comparisons = 0 with transaction.atomic(): @@ -108,7 +108,7 @@ def handle(self, *args, **options): for values in rows.itertuples(index=False): ComparisonCriteriaScore.objects.create( comparison=comparison, - criteria=values.criteria, + criteria=values.criterion, score=values.score, score_max=values.score_max, ) diff --git a/backend/tournesol/tests/test_api_comparison.py b/backend/tournesol/tests/test_api_comparison.py index fb27467425..0f9334444e 100644 --- a/backend/tournesol/tests/test_api_comparison.py +++ b/backend/tournesol/tests/test_api_comparison.py @@ -1,5 +1,6 @@ import datetime from copy import deepcopy +from unittest import skip from unittest.mock import patch from django.core.management import call_command @@ -1371,6 +1372,7 @@ def setUp(self): self.client = APIClient() + @skip("Online updates not implemented in Solidago") @override_settings( UPDATE_MEHESTAN_SCORES_ON_COMPARISON=True, MEHESTAN_MULTIPROCESSING=False, diff --git a/backend/tournesol/tests/test_api_exports.py b/backend/tournesol/tests/test_api_exports.py index 19b425e5da..c418abdccd 100644 --- a/backend/tournesol/tests/test_api_exports.py +++ b/backend/tournesol/tests/test_api_exports.py @@ -17,7 +17,7 @@ from django.test import TransactionTestCase, override_settings from rest_framework import status from rest_framework.test import APIClient -from solidago.pipeline.inputs import TournesolInputFromPublicDataset +from solidago.pipeline.inputs import TournesolDataset from core.models import User from core.tests.factories.user import UserFactory @@ -284,29 +284,14 @@ def test_export_metadata(self): self.assertEqual(metadata["generated_by"],settings.MAIN_URL) self.assertEqual(metadata["tournesol_version"],settings.TOURNESOL_VERSION) self.assertEqual( - set(metadata["algorithms_parameters"]["byztrust"].keys()), + set(metadata["solidago"]["pipeline"].keys()), { - "SINK_VOUCH", - "VOUCH_DECAY", - "TRUSTED_EMAIL_PRETRUST", - } - ) - self.assertEqual( - set(metadata["algorithms_parameters"]["mehestan"].keys()), - { - "W", - "OVER_TRUST_BIAS", - "OVER_TRUST_SCALE", - "VOTE_WEIGHT_PUBLIC_RATINGS", - "VOTE_WEIGHT_PRIVATE_RATINGS", - "MAX_SCALED_SCORE", - } - ) - self.assertEqual( - set(metadata["algorithms_parameters"]["individual_scores"]["parameters"].keys()), - { - "R_MAX", - "ALPHA", + "trust_propagation", + "voting_rights", + "preference_learning", + "scaling", + "aggregation", + "post_process", } ) @@ -542,18 +527,18 @@ def test_use_public_export_as_ml_input(self): self.assertEqual(response.status_code, status.HTTP_200_OK) zip_content = io.BytesIO(response.content) - ml_input = TournesolInputFromPublicDataset(zip_content) + ml_input = TournesolDataset(zip_content) comparisons_df = ml_input.get_comparisons() rating_properties = ml_input.ratings_properties self.assertEqual(len(comparisons_df), 1) self.assertEqual( list(comparisons_df.columns), - ["user_id", "entity_a", "entity_b", "criteria", "score", "score_max", "weight"], + ["user_id", "entity_a", "entity_b", "criterion", "score", "score_max", "weight"], ) self.assertEqual(len(rating_properties), 2) self.assertEqual( list(rating_properties.columns), - ["user_id", "entity_id", "is_public", "trust_score", "is_scaling_calibration_user"], + ["user_id", "entity_id", "is_public"], ) diff --git a/backend/tournesol/views/comparison.py b/backend/tournesol/views/comparison.py index a2453bb5ea..52e873d621 100644 --- a/backend/tournesol/views/comparison.py +++ b/backend/tournesol/views/comparison.py @@ -2,16 +2,13 @@ API endpoints to interact with the contributor's comparisons. """ -from django.conf import settings from django.db.models import ObjectDoesNotExist, Q from django.http import Http404 from django.utils.translation import gettext_lazy as _ from drf_spectacular.utils import extend_schema from rest_framework import exceptions, generics, mixins -from ml.mehestan.run import update_user_scores from tournesol.models import Comparison -from tournesol.models.poll import ALGORITHM_MEHESTAN from tournesol.serializers.comparison import ComparisonSerializer, ComparisonUpdateSerializer from tournesol.views.mixins.poll import PollScopedViewMixin @@ -119,8 +116,9 @@ def perform_create(self, serializer): comparison.entity_2.inner.refresh_metadata() comparison.entity_2.auto_remove_from_rate_later(poll=poll, user=self.request.user) - if settings.UPDATE_MEHESTAN_SCORES_ON_COMPARISON and poll.algorithm == ALGORITHM_MEHESTAN: - update_user_scores(poll, user=self.request.user) + # TODO: online updates are to be implemented in Solidago + # if settings.UPDATE_MEHESTAN_SCORES_ON_COMPARISON: + # update_user_scores(poll, user=self.request.user) class ComparisonListFilteredApi(ComparisonListBaseApi): @@ -208,17 +206,19 @@ def get_serializer_context(self): ctx["partial_update"] = self.request.method == 'PATCH' return ctx - def perform_update(self, serializer): - super().perform_update(serializer) - poll = self.poll_from_url - if settings.UPDATE_MEHESTAN_SCORES_ON_COMPARISON and poll.algorithm == ALGORITHM_MEHESTAN: - update_user_scores(poll, user=self.request.user) - - def perform_destroy(self, instance): - super().perform_destroy(instance) - poll = self.poll_from_url - if settings.UPDATE_MEHESTAN_SCORES_ON_COMPARISON and poll.algorithm == ALGORITHM_MEHESTAN: - update_user_scores(poll, user=self.request.user) + # TODO: online updates are to be implemented in Solidago + # def perform_update(self, serializer): + # super().perform_update(serializer) + # poll = self.poll_from_url + # if settings.UPDATE_MEHESTAN_SCORES_ON_COMPARISON: + # update_user_scores(poll, user=self.request.user) + + # TODO: online updates are to be implemented in Solidago + # def perform_destroy(self, instance): + # super().perform_destroy(instance) + # poll = self.poll_from_url + # if settings.UPDATE_MEHESTAN_SCORES_ON_COMPARISON: + # update_user_scores(poll, user=self.request.user) def get(self, request, *args, **kwargs): """Retrieve a comparison made by the logged user, in the given poll.""" diff --git a/backend/vouch/tests/test_trust_algo.py b/backend/vouch/tests/test_trust_algo.py deleted file mode 100644 index 60aaad9c49..0000000000 --- a/backend/vouch/tests/test_trust_algo.py +++ /dev/null @@ -1,115 +0,0 @@ -""" -All test cases of the trust algorithm. -""" -from django.test import TestCase - -from core.models.user import EmailDomain, User -from core.tests.factories.user import UserFactory -from vouch.models import Voucher -from vouch.trust_algo import TRUSTED_EMAIL_PRETRUST, trust_algo - - -class TrustAlgoTest(TestCase): - _user_0 = "username_0" - _user_1 = "username_1" - _user_2 = "username_2" - _user_3 = "username_3" - _user_4 = "username_4" - _user_5 = "username_5" - _user_6 = "username_6" - _user_7 = "username_7" - _user_8 = "username_8" - _user_9 = "username_9" - _nb_users = 10 - - def setUp(self) -> None: - self.user_0 = UserFactory(username=self._user_0) - self.user_1 = UserFactory(username=self._user_1, email="user1@trusted.test") - self.user_2 = UserFactory(username=self._user_2) - self.user_3 = UserFactory(username=self._user_3, email="user3@trusted.test") - self.user_4 = UserFactory(username=self._user_4) - self.user_5 = UserFactory(username=self._user_5) - self.user_6 = UserFactory(username=self._user_6, email="user6@trusted.test") - self.user_7 = UserFactory(username=self._user_7) - self.user_8 = UserFactory(username=self._user_8) - self.user_9 = UserFactory(username=self._user_9) - - email_domain = EmailDomain.objects.get(domain="@trusted.test") - email_domain.status = EmailDomain.STATUS_ACCEPTED - email_domain.save() - - Voucher.objects.bulk_create( - [ - # user_0 has given zero voucher - # user_1 has given three vouchers - Voucher(by=self.user_1, to=self.user_0), - Voucher(by=self.user_1, to=self.user_3), - Voucher(by=self.user_1, to=self.user_7), - # user_2 has given one voucher - Voucher(by=self.user_2, to=self.user_5), - # user_3 has given two vouchers - Voucher(by=self.user_3, to=self.user_1), - Voucher(by=self.user_3, to=self.user_5), - # user_4 has given one voucher - Voucher(by=self.user_4, to=self.user_7), - # user_5 has given one voucher - Voucher(by=self.user_5, to=self.user_1), - # user_6 has given zero voucher - # user_7 has given two vouchers - Voucher(by=self.user_7, to=self.user_1), - Voucher(by=self.user_7, to=self.user_2), - # user_8 has given one voucher - Voucher(by=self.user_8, to=self.user_3), - # user_9 has given two vouchers - Voucher(by=self.user_9, to=self.user_4), - Voucher(by=self.user_9, to=self.user_5), - ] - ) - - def test_trust_algo(self): - users = list(User.objects.all()) - for user in users: - self.assertIsNone(user.trust_score) - - trust_algo() - users = list(User.objects.all().order_by('username')) - self.assertTrue(users[1].trust_score >= TRUSTED_EMAIL_PRETRUST) - self.assertTrue(users[2].trust_score > 0) - self.assertAlmostEqual(users[9].trust_score, 0) - self.assertAlmostEqual(users[8].trust_score, 0) - - vouch18 = Voucher(by=self.user_1, to=self.user_8) - vouch18.save() - trust_algo() - users = list(User.objects.all().order_by('username')) - self.assertTrue(users[8].trust_score > 0) - - def test_trust_algo_without_pretrusted_users_is_noop(self): - # Keep only users without trusted emails - User.objects.exclude( - username__in=[self._user_7, self._user_8, self._user_9] - ).delete() - - for user in User.objects.all(): - self.assertIsNone(user.trust_score) - trust_algo() - for user in User.objects.all(): - self.assertIsNone(user.trust_score) - - def test_trust_algo_without_voucher(self): - Voucher.objects.all().delete() - - for user in User.objects.all(): - self.assertIsNone(user.trust_score) - - trust_algo() - - for user in User.objects.all(): - if user.has_trusted_email: - self.assertEqual(user.trust_score, TRUSTED_EMAIL_PRETRUST) - else: - self.assertEqual(user.trust_score, 0.0) - - def test_trust_algo_db_requests_count(self): - with self.assertNumQueries(3): - trust_algo() diff --git a/backend/vouch/trust_algo.py b/backend/vouch/trust_algo.py deleted file mode 100644 index b023cda14e..0000000000 --- a/backend/vouch/trust_algo.py +++ /dev/null @@ -1,94 +0,0 @@ -import logging - -import pandas as pd -from django.db.models import Q -from solidago.trust_propagation import LipschiTrust - -from core.models.user import User -from vouch.models import Voucher - -logger = logging.getLogger(__name__) - -# In this algorithm, we leverage pre-trust (e.g., based on email domains) and -# vouching to securely assign trust scores to a wider set of contributors. The -# algorithm inputs pre-trust status and a vouching directed graph. - - -# Trust scores are computed iteratively, which yields an approximate solution. -APPROXIMATION_ERROR = 1e-8 - -# In our model we assume that each participating contributor implicitly -# vouches for a sink. The sink counts for SINK_VOUCH vouchees. As a result, when a -# contributor with less than SINK_VOUCH vouchees vouches for more vouchees, -# the amount of trust scores the contributor assigns grows almost -# linearly, thereby not penalizing previously vouched contributors. -# Vouching is thereby not (too) disincentivized. -SINK_VOUCH = 5.0 - -# The algorithm guarantees that every pre-trusted user is given a trust score -# which is at least TRUSTED_EMAIL_PRETRUST. Moreover, all users' trust score -# will be at most 1. -TRUSTED_EMAIL_PRETRUST = 0.8 - -# When considering a random walker on the vouch network, -# (1 - VOUCH_DECAY) is the probability that the random walker resets -# its walk at each iteration. -# ByzTrust essentially robustifies the random walk, by frequently -# preventing the walker from visiting too frequently visited contributors, -# thereby bounding the maximal influence of such contributors. -VOUCH_DECAY = 0.8 - - -lipshitrust = LipschiTrust( - pretrust_value=TRUSTED_EMAIL_PRETRUST, - decay=VOUCH_DECAY, - sink_vouch=SINK_VOUCH, - error=APPROXIMATION_ERROR, -) - - -def trust_algo(): - """ - Improved version of the EigenTrust algorithm. - - Compute a global trust score for all users, based on the set of - pre-trusted users and on vouching made between users. - - (* the ones with an email from a trusted domain). - """ - # Import users and pretrust status - users = list( - User.objects.filter(is_active=True) - .annotate(with_trusted_email=Q(pk__in=User.with_trusted_email())) - .only("id") - ) - - users_df = pd.DataFrame( - { - "user_id": user.id, - "is_pretrusted": user.with_trusted_email, - } - for user in users - ) - users_df.set_index("user_id", inplace=True) - if not users_df["is_pretrusted"].any(): - logger.warning("Trust scores cannot be computed: no pre-trusted user exists") - return - - vouches = pd.DataFrame( - ( - {"voucher": vouch.by_id, "vouchee": vouch.to_id, "vouch": vouch.value} - for vouch in Voucher.objects.iterator() - if vouch.by_id in users_df.index and vouch.to_id in users_df.index - ), - columns=["voucher", "vouchee", "vouch"], - ) - - trust_scores = lipshitrust(users=users_df, vouches=vouches)["trust_score"] - - for user in users: - user.trust_score = trust_scores[user.id] - - # Updating all users at once increases the risk of a database deadlock. - # We use an explicitly low `batch_size` value to reduce this risk. - User.objects.bulk_update(users, ["trust_score"], batch_size=1000) diff --git a/browser-extension/yarn.lock b/browser-extension/yarn.lock index 3c2f5021f3..15f8bcd573 100644 --- a/browser-extension/yarn.lock +++ b/browser-extension/yarn.lock @@ -160,9 +160,9 @@ concat-map@0.0.1: integrity sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg== cross-spawn@^7.0.2: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.6" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.6.tgz#8a58fe78f00dcd70c370451759dfbfaf03e8ee9f" + integrity sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" diff --git a/frontend/.env.development b/frontend/.env.development index f157c9a5dc..ca0f4cb668 100644 --- a/frontend/.env.development +++ b/frontend/.env.development @@ -14,5 +14,15 @@ REACT_APP_CSP_IMAGE_SRC="https://i.ytimg.com https://www.paypal.com https://www. # 'unsafe-inline' is added here for development purposes only (used by hot reloading) REACT_APP_CSP_SCRIPT_SRC="'unsafe-inline' https://www.youtube.com/iframe_api https://www.youtube.com/s/player/" +# +# Debug and development flags +# + +# Enable the control of the displayed comparison inputs using a URL parameter. +REACT_APP_ENABLE_COMP_DEBUG_INPUT=true + +# # Tournesol feature flags +# + REACT_APP_POLL_PRESIDENTIELLE_2022_ENABLED=true diff --git a/frontend/.env.test b/frontend/.env.test index d9224c804e..bc4f63cfa8 100644 --- a/frontend/.env.test +++ b/frontend/.env.test @@ -3,6 +3,7 @@ REACT_APP_WEBSITE_ANALYTICS_URL= # Tournesol API configuration REACT_APP_API_URL=http://localhost:8000 + REACT_APP_OAUTH_CLIENT_ID=YlfkLzvVjmGw3gjJzdlFuMFWcR64fAk4WNg5ucGg REACT_APP_OAUTH_CLIENT_SECRET=iB9j9hM5ekFpKlZQ6uNGloFJIWLVnq8LoG7SNdCtHY5oM7w9KY0XjpaDuwwJ40BshH7jKYZmXniaybhrQf5p4irAOMWv82RdYRMD6TTSJciZEAxn9onpKQoUgUeDqsRj diff --git a/frontend/public/locales/en/translation.json b/frontend/public/locales/en/translation.json index 327663c2a8..1fb8204207 100644 --- a/frontend/public/locales/en/translation.json +++ b/frontend/public/locales/en/translation.json @@ -85,8 +85,9 @@ "title": "Criteria scores distribution" }, "comparison": { - "successfullySubmitted": "The comparison has been successfully submitted.", - "itemsAreSimilar": "These two items are very similar, it is probably not useful to compare them.", + "newComparison": "New comparison", + "saved": "Saved", + "successfullySubmitted": "Comparison successfully submitted.", "removeOptionalCriterias": "Remove optional criteria", "addOptionalCriterias": "Add optional criteria", "changeOneItem": "Change one of the items to submit a new comparison", @@ -94,6 +95,7 @@ "comparisonInPublicDataAfterSubmission": "After submission, this comparison will be included in the public data.", "editComparison": "Edit comparison", "criteriaSkipped": "skipped", + "itemsAreSimilar": "These two items are very similar, it is probably not useful to compare them.", "submitAComparison": "Submit a comparison", "inactivePoll": "This poll is closed.", "inactivePollComparisonCannotBeSubmittedOrEdited": "No comparison can be submitted or modified.", @@ -109,10 +111,18 @@ "hideHelp": "Hide the help", "showHelp": "Show help for comparisons" }, + "comparisonInput": { + "thisComparisonWasMadeOnAMobileDevice": "This comparison was made on a mobile device. The mobile interface exceptionally replaces the usual interface.", + "thisComparisonWasMadeOnAComputer": "This comparison was made on a computer. It cannot be modified on a mobile device." + }, "comparisons": { "goToComparison": "Go to comparison" }, "submit": "submit", + "comparisonCriteriaButtons": { + "nextQualityCriterion": "Next quality criterion", + "previousQualityCriterion": "Previous quality criterion" + }, "comparisonSeries": { "skipTheSeries": "Skip the series" }, @@ -147,6 +157,9 @@ "closeSearch": "Close search", "search": "Search" }, + "entitySelectorControls": { + "next": "next" + }, "tabsBox": { "subsample": "An ordered sample of your personal ranking. The elements you recommend the most appear first.", "compared": "Compared videos appear here.", @@ -616,8 +629,8 @@ "preferences": "Preferences" }, "poll": { - "videos": "videos", - "presidential2022": "election FR 2022", + "videos": "Videos", + "presidential2022": "Election FR 2022", "entityCandidate": "candidate", "entityVideo": "video" }, @@ -1192,12 +1205,16 @@ "title1": "Comparing two videos 🌻", "message1": { "p10": "After watching the videos, move the main handle to the video that should be largely recommended according to you. Save once you've made your choice.", - "p20": "The more your opinion is clear-cut on a criterion, the more the handle should be close to the slider extremity. If you find the videos similar, the handle should remain close to the center." + "p10mobile": "After watching the videos, vote for the video that should be largely recommended using the buttons below.", + "p20": "The more your opinion is clear-cut on a criterion, the more the handle should be close to the slider extremity. If you find the videos similar, the handle should remain close to the center.", + "p20mobile": "If your preference is strong, use the buttons at the ends. If you find the two videos similar, use the \"equal\" button in the middle." }, "title2": "Making comparisons helps science 🔬", "message2": { "p10": "Each comparison helps Tournesol improve its recommendations and its open database. Try to unfold and use the optional criteria now.", - "p20": "You can always choose to not vote on an optional criterion by unchecking it. You will be able to edit all your comparisons from your page My comparisons." + "p10mobile": "Each comparison helps Tournesol improve its recommendations and its open database.", + "p20": "You can always choose to not vote on an optional criterion by unchecking it. You will be able to edit all your comparisons from the page My comparisons.", + "p20mobile": "After a few comparisons, new optional criteria will be displayed. You will also be able to edit all your comparisons from the page My Comparisons." }, "title3": "How does Tournesol work? 🤖", "message3": { diff --git a/frontend/public/locales/fr/translation.json b/frontend/public/locales/fr/translation.json index 4adbd51aa2..3ed7819b40 100644 --- a/frontend/public/locales/fr/translation.json +++ b/frontend/public/locales/fr/translation.json @@ -89,8 +89,9 @@ "title": "Distribution des scores par critère" }, "comparison": { + "newComparison": "Nouvelle comparaison", + "saved": "Validé", "successfullySubmitted": "Comparaison bien enregistrée.", - "itemsAreSimilar": "Ces deux éléments sont très similaires, ce n'est probablement pas une bonne idée de les comparer.", "removeOptionalCriterias": "Supprimer les critères optionnels", "addOptionalCriterias": "Ajouter les critères optionnels", "changeOneItem": "Changez l'un des deux éléments pour faire une nouvelle comparaison.", @@ -98,6 +99,7 @@ "comparisonInPublicDataAfterSubmission": "Après enregistrement, cette comparaison fera partie des données publiques.", "editComparison": "Modifier la comparaison", "criteriaSkipped": "ignoré", + "itemsAreSimilar": "Ces deux éléments sont très similaires, ce n'est probablement pas une bonne idée de les comparer.", "submitAComparison": "Soumettre une comparaison", "inactivePoll": "Ce scrutin est fermé.", "inactivePollComparisonCannotBeSubmittedOrEdited": "Aucune comparaison ne peut être ajoutée ou modifiée.", @@ -113,10 +115,18 @@ "hideHelp": "Cacher l'aide", "showHelp": "Montrer l'aide pour les comparaisons" }, + "comparisonInput": { + "thisComparisonWasMadeOnAMobileDevice": "Cette comparaison a été faite sur un périphérique mobile. L'interface mobile remplace exceptionnellement l'interface habituelle.", + "thisComparisonWasMadeOnAComputer": "Cette comparaison a été faite sur un ordinateur. Elle ne peut pas être modifiée sur un périphérique mobile." + }, "comparisons": { "goToComparison": "Voir la comparaison" }, "submit": "enregistrer", + "comparisonCriteriaButtons": { + "nextQualityCriterion": "Critère de qualité suivant", + "previousQualityCriterion": "Critère de qualité précédent" + }, "comparisonSeries": { "skipTheSeries": "Passer la série" }, @@ -152,6 +162,9 @@ "closeSearch": "Fermer la recherche", "search": "Rechercher" }, + "entitySelectorControls": { + "next": "suivant" + }, "tabsBox": { "subsample": "Un extrait ordonné de votre classement personnel. Les éléments que vous recommandez le plus apparaissent en premier.", "compared": "Les vidéos que vous avez comparées apparaissent ici.", @@ -625,8 +638,8 @@ "preferences": "Préférences" }, "poll": { - "videos": "vidéos", - "presidential2022": "présidentielle FR 2022", + "videos": "Vidéos", + "presidential2022": "Présidentielle FR 2022", "entityCandidate": "candidat", "entityVideo": "vidéo" }, @@ -1202,12 +1215,16 @@ "title1": "Comparer deux vidéos 🌻", "message1": { "p10": "Après avoir regardé les vidéos, déplacez le curseur principal vers la vidéo qui, selon vous, devrait être largement recommandée. Enregistrez quand votre choix est fait.", - "p20": "Plus votre avis est tranché sur un critère, plus le curseur devrait être proche de l'extrémité. Si vous trouvez les vidéos similaires, le curseur devrait se rapprocher du centre." + "p10mobile": "Après avoir regardé les vidéos, votez pour celle qui dévrait être largement recommandée en utilisant les boutons de comparaison au dessous.", + "p20": "Plus votre avis est tranché sur un critère, plus le curseur devrait être proche de l'extrémité. Si vous trouvez les vidéos similaires, le curseur devrait se rapprocher du centre.", + "p20mobile": "Si votre avis est tranché, utilisez les boutons aux extrémités. Si vous trouvez les deux vidéos similaires, utilisez le bouton \"égal\" du milieu." }, "title2": "Faire des comparaisons aide la science 🔬", "message2": { "p10": "Chaque comparaison aide Tournesol à améliorer ses recommandations et sa base de données publique. Essayez maintenant de dérouler et d'utiliser les critères optionnels.", - "p20": "Vous pouvez choisir de ne pas vous exprimer sur un critère optionnel en le décochant. Il vous sera possible de modifier ultérieurement vos comparaisons depuis votre page Mes comparaisons." + "p10mobile": "Chaque comparaison aide Tournesol à améliorer ses recommandations et sa base de données publique.", + "p20": "Vous pouvez choisir de ne pas vous exprimer sur un critère optionnel en le décochant. Il vous sera possible de modifier ultérieurement vos comparaisons depuis votre page Mes comparaisons.", + "p20mobile": "Après quelques comparaisons de nouveaux critères optionnels vous serons proposés. Il vous sera aussi possible de modifier toutes vos comparaisons depuis votre page Mes comparaisons." }, "title3": "Comment fonctionne Tournesol ? 🤖", "message3": { diff --git a/frontend/src/components/ContentHeader.tsx b/frontend/src/components/ContentHeader.tsx index 991dad0807..0acf8a8a96 100644 --- a/frontend/src/components/ContentHeader.tsx +++ b/frontend/src/components/ContentHeader.tsx @@ -21,7 +21,7 @@ const ContentHeader = ({ chipLabel?: string; }) => { return ( - + { + return ( + + {!readOnly && ( + + )} ))} @@ -323,7 +321,7 @@ const ComparisonSliders = ({ )}