From dc2de1e20eee676ecef79b97baa085682d57f729 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Thu, 26 Oct 2023 20:22:48 +0200 Subject: [PATCH 01/10] [DCA] improve config error messages --- Trading/Mode/dca_trading_mode/dca_trading.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Trading/Mode/dca_trading_mode/dca_trading.py b/Trading/Mode/dca_trading_mode/dca_trading.py index 08b075e8c..17ad3d039 100644 --- a/Trading/Mode/dca_trading_mode/dca_trading.py +++ b/Trading/Mode/dca_trading_mode/dca_trading.py @@ -124,7 +124,9 @@ async def create_new_orders(self, symbol, _, state, **kwargs): use_total_holding=False, ) else: - self.logger.error(f"Missing {side.value} entry order quantity") + self.logger.error( + f"Missing {side.value} entry order quantity in {self.trading_mode.get_name()} configuration" + f", please set the \"Amount per buy order\" value.") return [] initial_entry_price = price if self.trading_mode.use_market_entry_orders else \ trading_personal_data.decimal_adapt_price( @@ -151,7 +153,11 @@ async def create_new_orders(self, symbol, _, state, **kwargs): secondary_order_type = trading_enums.TraderOrderType.BUY_LIMIT \ if side is trading_enums.TradeOrderSide.BUY else trading_enums.TraderOrderType.SELL_LIMIT if not secondary_quantity: - self.logger.error(f"Missing {secondary_order_type.value} secondary order quantity") + self.logger.error( + f"Missing {side.value} secondary entry order quantity in {self.trading_mode.get_name()} " + f"configuration, please set the \"Secondary entry orders amount\" value " + f"when enabling secondary entry orders." + ) else: for i in range(self.trading_mode.secondary_entry_orders_count): remaining_funds = initial_available_funds - sum( From 51dafd9f6ed44ce48c0b9d3e6f04361d6510f696 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Wed, 25 Oct 2023 23:26:21 +0200 Subject: [PATCH 02/10] [GPT] enable historical signals fetch --- Evaluator/TA/ai_evaluator/ai.py | 70 ++++++++-- Services/Services_bases/gpt_service/gpt.py | 152 ++++++++++++++++++++- 2 files changed, 204 insertions(+), 18 deletions(-) diff --git a/Evaluator/TA/ai_evaluator/ai.py b/Evaluator/TA/ai_evaluator/ai.py index 750c58351..f5370cda5 100644 --- a/Evaluator/TA/ai_evaluator/ai.py +++ b/Evaluator/TA/ai_evaluator/ai.py @@ -31,8 +31,10 @@ class GPTEvaluator(evaluators.TAEvaluator): + GLOBAL_VERSION = 1 PREPROMPT = "Predict: {up or down} {confidence%} (no other information)" PASSED_DATA_LEN = 10 + MAX_CONFIDENCE_PERCENT = 100 HIGH_CONFIDENCE_PERCENT = 80 MEDIUM_CONFIDENCE_PERCENT = 50 LOW_CONFIDENCE_PERCENT = 30 @@ -53,6 +55,7 @@ def __init__(self, tentacles_setup_config): self.indicator = None self.source = None self.period = None + self.max_confidence_threshold = 0 self.gpt_model = gpt_service.GPTService.DEFAULT_MODEL self.is_backtesting = False self.min_allowed_timeframe = os.getenv("MIN_GPT_TIMEFRAME", None) @@ -65,6 +68,7 @@ def __init__(self, tentacles_setup_config): except ValueError: self.logger.error(f"Invalid timeframe configuration: unknown timeframe: '{self.min_allowed_timeframe}'") self.allow_reevaluations = os_util.parse_boolean_environment_var("ALLOW_GPT_REEVALUATIONS", "True") + self.services_config = None def enable_reevaluation(self) -> bool: """ @@ -72,6 +76,13 @@ def enable_reevaluation(self) -> bool: """ return self.allow_reevaluations + @classmethod + def get_signals_history_type(cls): + """ + Override when this evaluator uses a specific type of signal history + """ + return commons_enums.SignalHistoryTypes.GPT + async def load_and_save_user_inputs(self, bot_id: str) -> dict: """ instance method API for user inputs @@ -100,6 +111,11 @@ def init_user_inputs(self, inputs: dict) -> None: self.period, inputs, min_val=1, title="Period: length of the indicator period." ) + self.max_confidence_threshold = self.UI.user_input( + "max_confidence_threshold", enums.UserInputTypes.INT, + self.max_confidence_threshold, inputs, min_val=0, max_val=100, + title="Maximum confidence threshold: % confidence value starting from which to return 1 or -1." + ) if len(self.GPT_MODELS) > 1 and self.enable_model_selector: self.gpt_model = self.UI.user_input( "GPT model", enums.UserInputTypes.OPTIONS, gpt_service.GPTService.DEFAULT_MODEL, @@ -112,7 +128,9 @@ async def _init_GPT_models(self): self.GPT_MODELS = [gpt_service.GPTService.DEFAULT_MODEL] if self.enable_model_selector and not self.is_backtesting: try: - service = await services_api.get_service(gpt_service.GPTService, self.is_backtesting) + service = await services_api.get_service( + gpt_service.GPTService, self.is_backtesting, self.services_config + ) self.GPT_MODELS = service.models except Exception as err: self.logger.exception(err, True, f"Impossible to fetch GPT models: {err}") @@ -138,13 +156,14 @@ async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle self.eval_note = commons_constants.START_PENDING_EVAL_NOTE if self._check_timeframe(time_frame): try: + candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] computed_data = self.call_indicator(candle_data) reduced_data = computed_data[-self.PASSED_DATA_LEN:] formatted_data = ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data) - prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame) + prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time) cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower() prediction_side = self._parse_prediction_side(cleaned_prediction) - if prediction_side == 0: + if prediction_side == 0 and not self.is_backtesting: self.logger.error(f"Error when reading GPT answer: {cleaned_prediction}") return confidence = self._parse_confidence(cleaned_prediction) / 100 @@ -171,20 +190,35 @@ async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle eval_time=evaluators_util.get_eval_time(full_candle=candle, time_frame=time_frame)) - async def ask_gpt(self, preprompt, inputs, symbol, time_frame) -> str: + async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> str: try: - service = await services_api.get_service(gpt_service.GPTService, self.is_backtesting) + service = await services_api.get_service( + gpt_service.GPTService, + self.is_backtesting, + {} if self.is_backtesting else self.services_config + ) resp = await service.get_chat_completion( [ service.create_message("system", preprompt), service.create_message("user", inputs), ], - model=self.gpt_model if self.enable_model_selector else None + model=self.gpt_model if self.enable_model_selector else None, + exchange=self.exchange_name, + symbol=symbol, + time_frame=time_frame, + version=self.get_version(), + candle_open_time=candle_time, + use_stored_signals=self.is_backtesting ) self.logger.info(f"GPT's answer is '{resp}' for {symbol} on {time_frame} with input: {inputs}") return resp except services_errors.CreationError as err: raise evaluators_errors.UnavailableEvaluatorError(f"Impossible to get ChatGPT prediction: {err}") from err + except Exception as err: + print(err) + + def get_version(self): + return f"{self.gpt_model}-{self.source}-{self.indicator}-{self.period}-{self.GLOBAL_VERSION}" def call_indicator(self, candle_data): return data_util.drop_nan(self.INDICATORS[self.indicator](candle_data, self.period)) @@ -216,14 +250,20 @@ def _parse_confidence(self, cleaned_prediction): up with 70% confidence up with high confidence """ + value = self.LOW_CONFIDENCE_PERCENT if "%" in cleaned_prediction: percent_index = cleaned_prediction.index("%") - return float(cleaned_prediction[:percent_index].split(" ")[-1]) - if "high" in cleaned_prediction: - return self.HIGH_CONFIDENCE_PERCENT - if "medium" in cleaned_prediction or "intermediate" in cleaned_prediction: - return self.MEDIUM_CONFIDENCE_PERCENT - if "low" in cleaned_prediction: - return self.LOW_CONFIDENCE_PERCENT - self.logger.warning(f"Impossible to parse confidence in {cleaned_prediction}. Using low confidence") - return self.LOW_CONFIDENCE_PERCENT + value = float(cleaned_prediction[:percent_index].split(" ")[-1]) + elif "high" in cleaned_prediction: + value = self.HIGH_CONFIDENCE_PERCENT + elif "medium" in cleaned_prediction or "intermediate" in cleaned_prediction: + value = self.MEDIUM_CONFIDENCE_PERCENT + elif "low" in cleaned_prediction: + value = self.LOW_CONFIDENCE_PERCENT + elif not cleaned_prediction: + value = 0 + else: + self.logger.warning(f"Impossible to parse confidence in {cleaned_prediction}. Using low confidence") + if value >= self.max_confidence_threshold: + return self.MAX_CONFIDENCE_PERCENT + return value diff --git a/Services/Services_bases/gpt_service/gpt.py b/Services/Services_bases/gpt_service/gpt.py index 02549b29b..2385342ac 100644 --- a/Services/Services_bases/gpt_service/gpt.py +++ b/Services/Services_bases/gpt_service/gpt.py @@ -13,6 +13,7 @@ # # You should have received a copy of the GNU Lesser General Public # License along with this library. +import asyncio import os import openai import logging @@ -21,10 +22,19 @@ import octobot_services.constants as services_constants import octobot_services.services as services import octobot_services.errors as errors + +import octobot_commons.enums as commons_enums +import octobot_commons.constants as commons_constants +import octobot_commons.os_util as os_util +import octobot_commons.authentication as authentication +import octobot_commons.tree as tree + import octobot.constants as constants +import octobot.community as community class GPTService(services.AbstractService): + BACKTESTING_ENABLED = True DEFAULT_MODEL = "gpt-3.5-turbo" def get_fields_description(self): @@ -46,6 +56,7 @@ def __init__(self): logging.getLogger("openai").setLevel(logging.WARNING) self._env_secret_key = os.getenv(services_constants.ENV_OPENAI_SECRET_KEY, None) self.model = os.getenv(services_constants.ENV_GPT_MODEL, self.DEFAULT_MODEL) + self.stored_signals: tree.BaseTree = tree.BaseTree() self.models = [] self.daily_tokens_limit = int(os.getenv(services_constants.ENV_GPT_DAILY_TOKENS_LIMIT, 0)) self.consumed_daily_tokens = 1 @@ -63,6 +74,27 @@ async def get_chat_completion( n=1, stop=None, temperature=0.5, + exchange: str = None, + symbol: str = None, + time_frame: str = None, + version: str = None, + candle_open_time: float = None, + use_stored_signals: bool = False, + ) -> str: + if use_stored_signals: + return self._get_signal_from_stored_signals(exchange, symbol, time_frame, version, candle_open_time) + if self.use_stored_signals_only(): + return await self._fetch_signal_from_stored_signals(exchange, symbol, time_frame, version, candle_open_time) + return await self._get_signal_from_gpt(messages, model, max_tokens, n, stop, temperature) + + async def _get_signal_from_gpt( + self, + messages, + model=None, + max_tokens=3000, + n=1, + stop=None, + temperature=0.5 ): self._ensure_rate_limit() try: @@ -87,6 +119,111 @@ async def get_chat_completion( f"Unexpected error when running request with model {model}: {err}" ) from err + def _get_signal_from_stored_signals( + self, + exchange: str, + symbol: str, + time_frame: str, + version: str, + candle_open_time: float, + ): + try: + return self.stored_signals.get_node([exchange, symbol, time_frame, version, candle_open_time]).node_value + except tree.NodeExistsError: + return "" + + async def _fetch_signal_from_stored_signals( + self, + exchange: str, + symbol: str, + time_frame: str, + version: str, + candle_open_time: float, + ) -> str: + authenticator = authentication.Authenticator.instance() + try: + return await authenticator.get_gpt_signal( + exchange, symbol, commons_enums.TimeFrames(time_frame), candle_open_time, version + ) + except Exception as err: + self.logger.exception(err, True, f"Error when fetching gpt signal: {err}") + + def store_signal_history( + self, + exchange: str, + symbol: str, + time_frame: commons_enums.TimeFrames, + version: str, + signals_by_candle_open_time, + ): + tf = time_frame.value + for candle_open_time, signal in signals_by_candle_open_time.items(): + self.stored_signals.set_node_at_path( + signal, + str, + [exchange, symbol, tf, version, candle_open_time] + ) + + def has_signal_history( + self, + exchange: str, + symbol: str, + time_frame: commons_enums.TimeFrames, + min_timestamp: float, + max_timestamp: float, + version: str + ): + for ts in (min_timestamp, max_timestamp): + if self._get_signal_from_stored_signals( + exchange, symbol, time_frame.value, version, self._get_open_candle_timestamp(time_frame, ts) + ) == "": + return False + return True + + async def _fetch_and_store_history( + self, authenticator, exchange_name, symbol, time_frame, version, min_timestamp: float, max_timestamp: float + ): + signals_by_candle_open_time = await authenticator.get_gpt_signals_history( + exchange_name, symbol, time_frame, + self._get_open_candle_timestamp(time_frame, min_timestamp), + self._get_open_candle_timestamp(time_frame, max_timestamp), + version + ) + if not signals_by_candle_open_time: + self.logger.error( + f"No ChatGPT signal history for {symbol} on {time_frame.value} for {exchange_name} with {version}. " + f"Please check {self._supported_history_url()} to get the list of supported signals history." + ) + self.store_signal_history( + exchange_name, symbol, time_frame, version, signals_by_candle_open_time + ) + + async def fetch_gpt_history( + self, exchange_name: str, symbols: list, time_frames: list, + version: str, start_timestamp: float, end_timestamp: float + ): + authenticator = authentication.Authenticator.instance() + coros = [ + self._fetch_and_store_history( + authenticator, exchange_name, symbol, time_frame, version, start_timestamp, end_timestamp + ) + for symbol in symbols + for time_frame in time_frames + if not self.has_signal_history(exchange_name, symbol, time_frame, start_timestamp, end_timestamp, version) + ] + if coros: + await asyncio.gather(*coros) + + def _get_open_candle_timestamp(self, time_frame: commons_enums.TimeFrames, base_timestamp: float): + tf_seconds = commons_enums.TimeFramesMinutes[time_frame] * commons_constants.MINUTE_TO_SECONDS + return base_timestamp - (base_timestamp % tf_seconds) + + def clear_signal_history(self): + self.stored_signals.clear() + + def _supported_history_url(self): + return f"{community.IdentifiersProvider.COMMUNITY_LANDING_URL}/chat-gpt-trading" + def _ensure_rate_limit(self): if self.last_consumed_token_date != datetime.date.today(): self.consumed_daily_tokens = 0 @@ -101,7 +238,7 @@ def _update_token_usage(self, consumed_tokens): self.logger.debug(f"Consumed {consumed_tokens} tokens. {self.consumed_daily_tokens} consumed tokens today.") def check_required_config(self, config): - if self._env_secret_key is not None: + if self._env_secret_key is not None or self.use_stored_signals_only(): return True try: return bool(config[services_constants.CONIG_OPENAI_SECRET_KEY]) @@ -110,6 +247,8 @@ def check_required_config(self, config): def has_required_configuration(self): try: + if self.use_stored_signals_only(): + return True return self.check_required_config( self.config[services_constants.CONFIG_CATEGORY_SERVICES].get(services_constants.CONFIG_GPT, {}) ) @@ -140,6 +279,9 @@ def _get_api_key(self): async def prepare(self) -> None: try: + if self.use_stored_signals_only(): + self.logger.info(f"Skipping models fetch as self.use_stored_signals_only() is True") + return fetched_models = await openai.Model.alist(api_key=self._get_api_key()) self.models = [d["id"] for d in fetched_models["data"]] if self.model not in self.models: @@ -151,11 +293,15 @@ async def prepare(self) -> None: self.logger.error(f"Unexpected error when checking api key: {err}") def _is_healthy(self): - return self._get_api_key() and self.models + return self.use_stored_signals_only() or (self._get_api_key() and self.models) def get_successful_startup_message(self): - return f"GPT configured and ready. {len(self.models)} AI models are available. Using {self.model}.", \ + return f"GPT configured and ready. {len(self.models)} AI models are available. " \ + f"Using {'stored signals' if self.use_stored_signals_only() else self.models}.", \ self._is_healthy() + def use_stored_signals_only(self): + return not self.config + async def stop(self): pass From 402ffe6056c28f4d9be8bb6c4f2843f40fdf7ad2 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Fri, 27 Oct 2023 15:28:17 +0200 Subject: [PATCH 03/10] [GPT] ask gpt in async_evaluation --- Evaluator/TA/ai_evaluator/ai.py | 79 ++++++++++--------- .../tests/exchanges/__init__.py | 2 +- 2 files changed, 41 insertions(+), 40 deletions(-) diff --git a/Evaluator/TA/ai_evaluator/ai.py b/Evaluator/TA/ai_evaluator/ai.py index f5370cda5..4e8e7b72f 100644 --- a/Evaluator/TA/ai_evaluator/ai.py +++ b/Evaluator/TA/ai_evaluator/ai.py @@ -153,42 +153,43 @@ async def ohlcv_callback(self, exchange: str, exchange_id: str, await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): - self.eval_note = commons_constants.START_PENDING_EVAL_NOTE - if self._check_timeframe(time_frame): - try: - candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] - computed_data = self.call_indicator(candle_data) - reduced_data = computed_data[-self.PASSED_DATA_LEN:] - formatted_data = ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data) - prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time) - cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower() - prediction_side = self._parse_prediction_side(cleaned_prediction) - if prediction_side == 0 and not self.is_backtesting: - self.logger.error(f"Error when reading GPT answer: {cleaned_prediction}") - return - confidence = self._parse_confidence(cleaned_prediction) / 100 - self.eval_note = prediction_side * confidence - except services_errors.InvalidRequestError as e: - self.logger.error(f"Invalid GPT request: {e}") - except services_errors.RateLimitError as e: - self.logger.error(f"Too many requests: {e}") - except services_errors.UnavailableInBacktestingError: - # error already logged error for backtesting in use_backtesting_init_timeout - pass - except evaluators_errors.UnavailableEvaluatorError as e: - self.logger.exception(e, True, f"Evaluation error: {e}") - except tulipy.lib.InvalidOptionError as e: - self.logger.warning( - f"Error when computing {self.indicator} on {self.period} period with {len(candle_data)} " - f"candles: {e}" - ) - self.logger.exception(e, False) - else: - self.logger.debug(f"Ignored {time_frame} time frame as the shorted allowed time frame is " - f"{self.min_allowed_timeframe}") - await self.evaluation_completed(cryptocurrency, symbol, time_frame, - eval_time=evaluators_util.get_eval_time(full_candle=candle, - time_frame=time_frame)) + async with self.async_evaluation(): + self.eval_note = commons_constants.START_PENDING_EVAL_NOTE + if self._check_timeframe(time_frame): + try: + candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] + computed_data = self.call_indicator(candle_data) + reduced_data = computed_data[-self.PASSED_DATA_LEN:] + formatted_data = ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data) + prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time) + cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower() + prediction_side = self._parse_prediction_side(cleaned_prediction) + if prediction_side == 0 and not self.is_backtesting: + self.logger.error(f"Error when reading GPT answer: {cleaned_prediction}") + return + confidence = self._parse_confidence(cleaned_prediction) / 100 + self.eval_note = prediction_side * confidence + except services_errors.InvalidRequestError as e: + self.logger.error(f"Invalid GPT request: {e}") + except services_errors.RateLimitError as e: + self.logger.error(f"Too many requests: {e}") + except services_errors.UnavailableInBacktestingError: + # error already logged error for backtesting in use_backtesting_init_timeout + pass + except evaluators_errors.UnavailableEvaluatorError as e: + self.logger.exception(e, True, f"Evaluation error: {e}") + except tulipy.lib.InvalidOptionError as e: + self.logger.warning( + f"Error when computing {self.indicator} on {self.period} period with {len(candle_data)} " + f"candles: {e}" + ) + self.logger.exception(e, False) + else: + self.logger.debug(f"Ignored {time_frame} time frame as the shorted allowed time frame is " + f"{self.min_allowed_timeframe}") + await self.evaluation_completed(cryptocurrency, symbol, time_frame, + eval_time=evaluators_util.get_eval_time(full_candle=candle, + time_frame=time_frame)) async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> str: try: @@ -214,11 +215,11 @@ async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> s return resp except services_errors.CreationError as err: raise evaluators_errors.UnavailableEvaluatorError(f"Impossible to get ChatGPT prediction: {err}") from err - except Exception as err: - print(err) def get_version(self): - return f"{self.gpt_model}-{self.source}-{self.indicator}-{self.period}-{self.GLOBAL_VERSION}" + # later on, identify by its specs + # return f"{self.gpt_model}-{self.source}-{self.indicator}-{self.period}-{self.GLOBAL_VERSION}" + return "0.0.0" def call_indicator(self, candle_data): return data_util.drop_nan(self.INDICATORS[self.indicator](candle_data, self.period)) diff --git a/Meta/Keywords/scripting_library/tests/exchanges/__init__.py b/Meta/Keywords/scripting_library/tests/exchanges/__init__.py index 8f63aa0a6..1cd00bbf3 100644 --- a/Meta/Keywords/scripting_library/tests/exchanges/__init__.py +++ b/Meta/Keywords/scripting_library/tests/exchanges/__init__.py @@ -36,7 +36,7 @@ async def fake_backtesting(backtesting_config): config=backtesting_config, exchange_ids=[], matrix_id="", - backtesting_files=[] + backtesting_files=[], ) From 192acc7326f1b255a33ea84b114fd86dd83b5719 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Fri, 27 Oct 2023 17:09:29 +0200 Subject: [PATCH 04/10] [GPT] add full candles request --- Evaluator/TA/ai_evaluator/ai.py | 56 ++++++++++++++++------ Services/Services_bases/gpt_service/gpt.py | 23 +++++---- 2 files changed, 55 insertions(+), 24 deletions(-) diff --git a/Evaluator/TA/ai_evaluator/ai.py b/Evaluator/TA/ai_evaluator/ai.py index 4e8e7b72f..a29cab661 100644 --- a/Evaluator/TA/ai_evaluator/ai.py +++ b/Evaluator/TA/ai_evaluator/ai.py @@ -39,7 +39,7 @@ class GPTEvaluator(evaluators.TAEvaluator): MEDIUM_CONFIDENCE_PERCENT = 50 LOW_CONFIDENCE_PERCENT = 30 INDICATORS = { - "No indicator: the raw value of the selected source": lambda data, period: data, + "No indicator: raw candles price data": lambda data, period: data, "EMA: Exponential Moving Average": tulipy.ema, "SMA: Simple Moving Average": tulipy.sma, "Kaufman Adaptive Moving Average": tulipy.kama, @@ -47,7 +47,7 @@ class GPTEvaluator(evaluators.TAEvaluator): "RSI: Relative Strength Index": tulipy.rsi, "Detrended Price Oscillator": tulipy.dpo, } - SOURCES = ["Open", "High", "Low", "Close", "Volume"] + SOURCES = ["Open", "High", "Low", "Close", "Volume", "Full candle (For no indicator only)"] GPT_MODELS = [] def __init__(self, tentacles_setup_config): @@ -55,7 +55,7 @@ def __init__(self, tentacles_setup_config): self.indicator = None self.source = None self.period = None - self.max_confidence_threshold = 0 + self.min_confidence_threshold = 0 self.gpt_model = gpt_service.GPTService.DEFAULT_MODEL self.is_backtesting = False self.min_allowed_timeframe = os.getenv("MIN_GPT_TIMEFRAME", None) @@ -109,12 +109,12 @@ def init_user_inputs(self, inputs: dict) -> None: self.period = self.UI.user_input( "period", enums.UserInputTypes.INT, self.period, inputs, min_val=1, - title="Period: length of the indicator period." + title="Period: length of the indicator period or the number of candles to give to ChatGPT." ) - self.max_confidence_threshold = self.UI.user_input( - "max_confidence_threshold", enums.UserInputTypes.INT, - self.max_confidence_threshold, inputs, min_val=0, max_val=100, - title="Maximum confidence threshold: % confidence value starting from which to return 1 or -1." + self.min_confidence_threshold = self.UI.user_input( + "min_confidence_threshold", enums.UserInputTypes.INT, + self.min_confidence_threshold, inputs, min_val=0, max_val=100, + title="Minimum confidence threshold: % confidence value starting from which to return 1 or -1." ) if len(self.GPT_MODELS) > 1 and self.enable_model_selector: self.gpt_model = self.UI.user_input( @@ -146,10 +146,7 @@ async def _init_registered_topics(self, all_symbols_by_crypto_currencies, curren async def ohlcv_callback(self, exchange: str, exchange_id: str, cryptocurrency: str, symbol: str, time_frame, candle, inc_in_construction_data): - candle_data = self.get_candles_data_api()( - self.get_exchange_symbol_data(exchange, exchange_id, symbol), time_frame, - include_in_construction=inc_in_construction_data - ) + candle_data = self.get_candles_data(exchange, exchange_id, symbol, time_frame, inc_in_construction_data) await self.evaluate(cryptocurrency, symbol, time_frame, candle_data, candle) async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle): @@ -159,8 +156,7 @@ async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle try: candle_time = candle[commons_enums.PriceIndexes.IND_PRICE_TIME.value] computed_data = self.call_indicator(candle_data) - reduced_data = computed_data[-self.PASSED_DATA_LEN:] - formatted_data = ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data) + formatted_data = self.get_formatted_data(computed_data) prediction = await self.ask_gpt(self.PREPROMPT, formatted_data, symbol, time_frame, candle_time) cleaned_prediction = prediction.strip().replace("\n", "").replace(".", "").lower() prediction_side = self._parse_prediction_side(cleaned_prediction) @@ -191,6 +187,12 @@ async def evaluate(self, cryptocurrency, symbol, time_frame, candle_data, candle eval_time=evaluators_util.get_eval_time(full_candle=candle, time_frame=time_frame)) + def get_formatted_data(self, computed_data) -> str: + if self.source in self.get_unformated_sources(): + return str(computed_data) + reduced_data = computed_data[-self.PASSED_DATA_LEN:] + return ", ".join(str(datum).replace('[', '').replace(']', '') for datum in reduced_data) + async def ask_gpt(self, preprompt, inputs, symbol, time_frame, candle_time) -> str: try: service = await services_api.get_service( @@ -222,8 +224,32 @@ def get_version(self): return "0.0.0" def call_indicator(self, candle_data): + if self.source in self.get_unformated_sources(): + return candle_data return data_util.drop_nan(self.INDICATORS[self.indicator](candle_data, self.period)) + def get_candles_data(self, exchange, exchange_id, symbol, time_frame, inc_in_construction_data): + if self.source in self.get_unformated_sources(): + limit = self.period if inc_in_construction_data else self.period + 1 + full_candles = trading_api.get_candles_as_list( + trading_api.get_symbol_historical_candles( + self.get_exchange_symbol_data(exchange, exchange_id, symbol), time_frame, limit=limit + ) + ) + # remove time value + for candle in full_candles: + candle.pop(commons_enums.PriceIndexes.IND_PRICE_TIME.value) + if inc_in_construction_data: + return full_candles + return full_candles[:-1] + return self.get_candles_data_api()( + self.get_exchange_symbol_data(exchange, exchange_id, symbol), time_frame, + include_in_construction=inc_in_construction_data + ) + + def get_unformated_sources(self): + return (self.SOURCES[5], ) + def get_candles_data_api(self): return { self.SOURCES[0]: trading_api.get_symbol_open_candles, @@ -265,6 +291,6 @@ def _parse_confidence(self, cleaned_prediction): value = 0 else: self.logger.warning(f"Impossible to parse confidence in {cleaned_prediction}. Using low confidence") - if value >= self.max_confidence_threshold: + if value >= self.min_confidence_threshold: return self.MAX_CONFIDENCE_PERCENT return value diff --git a/Services/Services_bases/gpt_service/gpt.py b/Services/Services_bases/gpt_service/gpt.py index 2385342ac..3b920dccf 100644 --- a/Services/Services_bases/gpt_service/gpt.py +++ b/Services/Services_bases/gpt_service/gpt.py @@ -25,7 +25,7 @@ import octobot_commons.enums as commons_enums import octobot_commons.constants as commons_constants -import octobot_commons.os_util as os_util +import octobot_commons.time_frame_manager as time_frame_manager import octobot_commons.authentication as authentication import octobot_commons.tree as tree @@ -175,7 +175,7 @@ def has_signal_history( ): for ts in (min_timestamp, max_timestamp): if self._get_signal_from_stored_signals( - exchange, symbol, time_frame.value, version, self._get_open_candle_timestamp(time_frame, ts) + exchange, symbol, time_frame.value, version, time_frame_manager.get_last_timeframe_time(time_frame, ts) ) == "": return False return True @@ -185,11 +185,16 @@ async def _fetch_and_store_history( ): signals_by_candle_open_time = await authenticator.get_gpt_signals_history( exchange_name, symbol, time_frame, - self._get_open_candle_timestamp(time_frame, min_timestamp), - self._get_open_candle_timestamp(time_frame, max_timestamp), + time_frame_manager.get_last_timeframe_time(time_frame, min_timestamp), + time_frame_manager.get_last_timeframe_time(time_frame, max_timestamp), version ) - if not signals_by_candle_open_time: + if signals_by_candle_open_time: + self.logger.info( + f"Fetched {len(signals_by_candle_open_time)} ChatGPT signals " + f"history for {symbol} {time_frame} on {exchange_name}." + ) + else: self.logger.error( f"No ChatGPT signal history for {symbol} on {time_frame.value} for {exchange_name} with {version}. " f"Please check {self._supported_history_url()} to get the list of supported signals history." @@ -198,6 +203,10 @@ async def _fetch_and_store_history( exchange_name, symbol, time_frame, version, signals_by_candle_open_time ) + @staticmethod + def is_setup_correctly(config): + return True + async def fetch_gpt_history( self, exchange_name: str, symbols: list, time_frames: list, version: str, start_timestamp: float, end_timestamp: float @@ -214,10 +223,6 @@ async def fetch_gpt_history( if coros: await asyncio.gather(*coros) - def _get_open_candle_timestamp(self, time_frame: commons_enums.TimeFrames, base_timestamp: float): - tf_seconds = commons_enums.TimeFramesMinutes[time_frame] * commons_constants.MINUTE_TO_SECONDS - return base_timestamp - (base_timestamp % tf_seconds) - def clear_signal_history(self): self.stored_signals.clear() From db072c411438c5a63a7a34505a20f6dfde929f90 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Fri, 27 Oct 2023 17:22:42 +0200 Subject: [PATCH 05/10] [Profiles] do not select duplicated profiles --- .../Interfaces/web_interface/controllers/configuration.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Services/Interfaces/web_interface/controllers/configuration.py b/Services/Interfaces/web_interface/controllers/configuration.py index 33af7c51e..68e644770 100644 --- a/Services/Interfaces/web_interface/controllers/configuration.py +++ b/Services/Interfaces/web_interface/controllers/configuration.py @@ -110,9 +110,8 @@ def profiles_management(action): return util.get_rest_reply(flask.jsonify(data)) if action == "duplicate": profile_id = flask.request.args.get("profile_id") - new_profile = models.duplicate_profile(profile_id) - models.select_profile(new_profile.profile_id) - flask.flash(f"New profile successfully created and selected.", "success") + models.duplicate_profile(profile_id) + flask.flash(f"New profile successfully created.", "success") return util.get_rest_reply(flask.jsonify("Profile created")) if action == "use_as_live": profile_id = flask.request.args.get("profile_id") From 7f7872bc801bf33e0a479b389771f1ff306e8e40 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Fri, 27 Oct 2023 17:26:37 +0200 Subject: [PATCH 06/10] [GPT] docs update --- Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md | 6 ++++-- Services/Services_bases/gpt_service/gpt.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md b/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md index 998a41680..6ad1523c4 100644 --- a/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md +++ b/Evaluator/TA/ai_evaluator/resources/GPTEvaluator.md @@ -1,5 +1,7 @@ Uses [Chat GPT](https://chat.openai.com/) to predict the next moves of the market. -Evaluates between -1 to 1 according to chat GPT's prediction of the selected data and its confidence. +Evaluates between -1 to 1 according to ChatGPT's prediction of the selected data and its confidence. -*This evaluator can't be used in backtesting.* +Note: this evaluator can only be used in backtesting for markets where historical ChatGPT data are available. + +Find the full list of supported historical markets on https://www.octobot.cloud/features/chatgpt-trading diff --git a/Services/Services_bases/gpt_service/gpt.py b/Services/Services_bases/gpt_service/gpt.py index 3b920dccf..9c0975832 100644 --- a/Services/Services_bases/gpt_service/gpt.py +++ b/Services/Services_bases/gpt_service/gpt.py @@ -227,7 +227,7 @@ def clear_signal_history(self): self.stored_signals.clear() def _supported_history_url(self): - return f"{community.IdentifiersProvider.COMMUNITY_LANDING_URL}/chat-gpt-trading" + return f"{community.IdentifiersProvider.COMMUNITY_LANDING_URL}/features/chatgpt-trading" def _ensure_rate_limit(self): if self.last_consumed_token_date != datetime.date.today(): From 3f2a2ed7aec7e82d4721f47827d2f1a625b99f46 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Fri, 27 Oct 2023 21:22:12 +0200 Subject: [PATCH 07/10] [DisplayedElements] add symbols and timeframe params to fill --- .../scripting_library/UI/plots/displayed_elements.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py b/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py index cef8c3980..6bd2d8d00 100644 --- a/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py +++ b/Meta/Keywords/scripting_library/UI/plots/displayed_elements.py @@ -39,8 +39,7 @@ class DisplayedElements(display.DisplayTranslator): } async def fill_from_database(self, trading_mode, database_manager, exchange_name, symbol, time_frame, exchange_id, - with_inputs=True): - + with_inputs=True, symbols=None, time_frames=None): async with databases.MetaDatabase.database(database_manager) as meta_db: graphs_by_parts = {} inputs = [] @@ -52,6 +51,10 @@ async def fill_from_database(self, trading_mode, database_manager, exchange_name run_db = meta_db.get_run_db() metadata_rows = await run_db.all(commons_enums.DBTables.METADATA.value) metadata = metadata_rows[0] if metadata_rows else None + if symbols is not None: + symbols.extend(metadata[commons_enums.BacktestingMetadata.SYMBOLS.value]) + if time_frames is not None: + time_frames.extend(metadata[commons_enums.BacktestingMetadata.TIME_FRAMES.value]) account_type = trading_api.get_account_type_from_run_metadata(metadata) \ if database_manager.is_backtesting() \ else trading_api.get_account_type_from_exchange_manager( From f2f8a78b96766f02a1d1e43b98dbea3ba9f50808 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Fri, 27 Oct 2023 23:14:50 +0200 Subject: [PATCH 08/10] [GPTEvaluator] fix tests --- Evaluator/TA/ai_evaluator/ai.py | 2 +- Evaluator/TA/ai_evaluator/tests/test_ai.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Evaluator/TA/ai_evaluator/ai.py b/Evaluator/TA/ai_evaluator/ai.py index a29cab661..93ebbeede 100644 --- a/Evaluator/TA/ai_evaluator/ai.py +++ b/Evaluator/TA/ai_evaluator/ai.py @@ -55,7 +55,7 @@ def __init__(self, tentacles_setup_config): self.indicator = None self.source = None self.period = None - self.min_confidence_threshold = 0 + self.min_confidence_threshold = 100 self.gpt_model = gpt_service.GPTService.DEFAULT_MODEL self.is_backtesting = False self.min_allowed_timeframe = os.getenv("MIN_GPT_TIMEFRAME", None) diff --git a/Evaluator/TA/ai_evaluator/tests/test_ai.py b/Evaluator/TA/ai_evaluator/tests/test_ai.py index cf644546c..145573cce 100644 --- a/Evaluator/TA/ai_evaluator/tests/test_ai.py +++ b/Evaluator/TA/ai_evaluator/tests/test_ai.py @@ -37,7 +37,8 @@ def test_indicators(GPT_evaluator): def test_get_candles_data_api(GPT_evaluator): for source in GPT_evaluator.SOURCES: GPT_evaluator.source = source - assert isinstance(GPT_evaluator.get_candles_data_api(), types.FunctionType) + if GPT_evaluator.source not in GPT_evaluator.get_unformated_sources(): + assert isinstance(GPT_evaluator.get_candles_data_api(), types.FunctionType) def test_parse_prediction_side(GPT_evaluator): @@ -57,3 +58,7 @@ def test_parse_confidence(GPT_evaluator): assert GPT_evaluator._parse_confidence("up 54.33%") == 54.33 assert GPT_evaluator._parse_confidence("down 70% confidence blablabla") == 70 assert GPT_evaluator._parse_confidence("Prediction: down 70%") == 70 + GPT_evaluator.min_confidence_threshold = 60 + assert GPT_evaluator._parse_confidence("up 70%") == 100 + assert GPT_evaluator._parse_confidence("up 60%") == 100 + assert GPT_evaluator._parse_confidence("up 59%") == 59 From 46d52581073b1d565768951da63f8c96f0ffcfde Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Sun, 29 Oct 2023 10:43:21 +0100 Subject: [PATCH 09/10] [GPTProfile] update GPT default profile to use Smart DCA --- profiles/gpt_trading/profile.json | 3 +-- .../specific_config/DCATradingMode.json | 26 +++++++++++++++++++ .../specific_config/DailyTradingMode.json | 20 -------------- .../specific_config/GPTEvaluator.json | 9 ++++--- .../SimpleStrategyEvaluator.json | 18 +++++++++++++ .../TechnicalAnalysisStrategyEvaluator.json | 22 ---------------- profiles/gpt_trading/tentacles_config.json | 4 +-- 7 files changed, 53 insertions(+), 49 deletions(-) create mode 100644 profiles/gpt_trading/specific_config/DCATradingMode.json delete mode 100644 profiles/gpt_trading/specific_config/DailyTradingMode.json create mode 100644 profiles/gpt_trading/specific_config/SimpleStrategyEvaluator.json delete mode 100644 profiles/gpt_trading/specific_config/TechnicalAnalysisStrategyEvaluator.json diff --git a/profiles/gpt_trading/profile.json b/profiles/gpt_trading/profile.json index fa8780c64..2353fcd3a 100644 --- a/profiles/gpt_trading/profile.json +++ b/profiles/gpt_trading/profile.json @@ -25,7 +25,6 @@ "taker": 0.1 }, "starting-portfolio": { - "BTC": 10, "USDT": 1000 } }, @@ -37,7 +36,7 @@ "profile": { "avatar": "ChatGPT_logo.svg", "complexity": 2, - "description": "GPT DailyTrading uses ChatGPT to predict the market. It can be used to send alerts according to ChatGPT's predictions and trade directly based on the profile's DailyTradingMode configuration.\nConfigure the GPTEvaluator to customize the way market data are sent to ChatGPT.", + "description": "GPT Smart DCA uses ChatGPT to predict the market. It can be used to trade directly based on the profile's DCA Trading mode configuration.\nConfigure the GPTEvaluator to customize the way market data are sent to ChatGPT and the DCATradingMode to change how entries and exits should be created.", "id": "gpt_trading", "imported": false, "name": "GPT Trading", diff --git a/profiles/gpt_trading/specific_config/DCATradingMode.json b/profiles/gpt_trading/specific_config/DCATradingMode.json new file mode 100644 index 000000000..a79c6b31c --- /dev/null +++ b/profiles/gpt_trading/specific_config/DCATradingMode.json @@ -0,0 +1,26 @@ +{ + "buy_order_amount": "8%t", + "cancel_open_orders_at_each_entry": true, + "default_config": [ + "SimpleStrategyEvaluator" + ], + "entry_limit_orders_price_percent": 1.3, + "exit_limit_orders_price_percent": 2, + "minutes_before_next_buy": 10080, + "required_strategies": [ + "SimpleStrategyEvaluator", + "TechnicalAnalysisStrategyEvaluator" + ], + "secondary_entry_orders_amount": "8%t", + "secondary_entry_orders_count": 1, + "secondary_entry_orders_price_percent": 1.3, + "secondary_exit_orders_count": 1, + "secondary_exit_orders_price_percent": 0.5, + "trigger_mode": "Maximum evaluators signals based", + "use_init_entry_orders": false, + "use_market_entry_orders": false, + "use_secondary_entry_orders": true, + "use_secondary_exit_orders": false, + "use_stop_losses": false, + "use_take_profit_exit_orders": true +} \ No newline at end of file diff --git a/profiles/gpt_trading/specific_config/DailyTradingMode.json b/profiles/gpt_trading/specific_config/DailyTradingMode.json deleted file mode 100644 index c0c2d235b..000000000 --- a/profiles/gpt_trading/specific_config/DailyTradingMode.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "close_to_current_price_difference": 0.005, - "default_config": [ - "SimpleStrategyEvaluator" - ], - "required_strategies": [ - "SimpleStrategyEvaluator", - "TechnicalAnalysisStrategyEvaluator" - ], - "max_currency_percent": 100, - "required_strategies_min_count": 1, - "sell_with_maximum_size_orders": false, - "buy_with_maximum_size_orders": false, - "use_prices_close_to_current_price": false, - "disable_buy_orders": false, - "disable_sell_orders": false, - "use_stop_orders": true, - "emit_trading_signals": false, - "trading_strategy": "" -} \ No newline at end of file diff --git a/profiles/gpt_trading/specific_config/GPTEvaluator.json b/profiles/gpt_trading/specific_config/GPTEvaluator.json index dbe40b0c1..2417e4958 100644 --- a/profiles/gpt_trading/specific_config/GPTEvaluator.json +++ b/profiles/gpt_trading/specific_config/GPTEvaluator.json @@ -1,5 +1,8 @@ { - "indicator": "No indicator: the raw value of the selected source", - "period": 2, - "source": "Close" + "GPT_model": "gpt-3.5-turbo", + "indicator": "No indicator: raw candles price data", + "max_confidence_threshold": 60, + "min_confidence_threshold": 80, + "period": 20, + "source": "Full candle (For no indicator only)" } \ No newline at end of file diff --git a/profiles/gpt_trading/specific_config/SimpleStrategyEvaluator.json b/profiles/gpt_trading/specific_config/SimpleStrategyEvaluator.json new file mode 100644 index 000000000..3af652441 --- /dev/null +++ b/profiles/gpt_trading/specific_config/SimpleStrategyEvaluator.json @@ -0,0 +1,18 @@ +{ + "background_social_evaluators": [ + "RedditForumEvaluator" + ], + "default_config": [ + "DoubleMovingAverageTrendEvaluator", + "RSIMomentumEvaluator" + ], + "re_evaluate_TA_when_social_or_realtime_notification": true, + "required_candles_count": 1000, + "required_evaluators": [ + "*" + ], + "required_time_frames": [ + "4h" + ], + "social_evaluators_notification_timeout": 3600 +} \ No newline at end of file diff --git a/profiles/gpt_trading/specific_config/TechnicalAnalysisStrategyEvaluator.json b/profiles/gpt_trading/specific_config/TechnicalAnalysisStrategyEvaluator.json deleted file mode 100644 index 79aeae1db..000000000 --- a/profiles/gpt_trading/specific_config/TechnicalAnalysisStrategyEvaluator.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "compatible_evaluator_types": [ - "TA", - "REAL_TIME" - ], - "default_config": [ - "DoubleMovingAverageTrendEvaluator", - "RSIMomentumEvaluator" - ], - "required_evaluators": [ - "*" - ], - "required_time_frames": [ - "4h" - ], - "time_frames_to_weight": [ - { - "time_frame": "4h", - "weight": 50 - } - ] -} \ No newline at end of file diff --git a/profiles/gpt_trading/tentacles_config.json b/profiles/gpt_trading/tentacles_config.json index ae1de804d..1fb7d20f0 100644 --- a/profiles/gpt_trading/tentacles_config.json +++ b/profiles/gpt_trading/tentacles_config.json @@ -2,10 +2,10 @@ "tentacle_activation": { "Evaluator": { "GPTEvaluator": true, - "TechnicalAnalysisStrategyEvaluator": true + "SimpleStrategyEvaluator": true }, "Trading": { - "DailyTradingMode": true + "DCATradingMode": true } } } \ No newline at end of file From dc5c5c0ef9dfd40f66df5d0726fa003ec9d3a4f6 Mon Sep 17 00:00:00 2001 From: Guillaume De Saint Martin Date: Sun, 29 Oct 2023 14:23:19 +0100 Subject: [PATCH 10/10] [Links] update links to octobot.cloud --- .../TelegramChannelSignalEvaluator.md | 2 +- .../resources/TelegramSignalEvaluator.md | 2 +- README.md | 2 +- .../advanced_strategy_optimizer.html | 4 +-- .../advanced_tentacle_packages.html | 2 +- .../advanced_tentacles.html | 2 +- .../static/js/common/tutorial.js | 8 +++--- .../web_interface/templates/about.html | 6 ++--- .../web_interface/templates/accounts.html | 2 +- .../templates/components/config/profiles.html | 2 +- .../components/config/service_card.html | 2 +- .../templates/config_tentacle.html | 4 +-- .../web_interface/templates/index.html | 2 +- .../web_interface/templates/login.html | 2 +- .../web_interface/templates/octobot_help.html | 26 ++++++++++++------- .../web_interface/templates/profile.html | 6 ++--- Services/Services_bases/gpt_service/gpt.py | 2 +- .../Services_bases/reddit_service/reddit.py | 2 +- .../telegram_api_service/telegram_api.py | 2 +- .../telegram_service/telegram.py | 2 +- .../trading_view_service/trading_view.py | 2 +- .../Services_bases/twitter_service/twitter.py | 2 +- Services/Services_bases/web_service/web.py | 2 +- .../Services_bases/webhook_service/webhook.py | 2 +- profiles/tradingview_trading/profile.json | 2 +- 25 files changed, 49 insertions(+), 43 deletions(-) diff --git a/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md b/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md index 4693734f5..d8160334b 100644 --- a/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md +++ b/Evaluator/Social/signal_evaluator/resources/TelegramChannelSignalEvaluator.md @@ -4,4 +4,4 @@ Triggers on a Telegram signal from any channel your personal account joined. Signal parsing is configurable according to the name of the channel. -See [OctoBot docs about Telegram API service](https://www.octobot.info/interfaces/telegram-interface/telegram-api) for more information. +See [OctoBot docs about Telegram API service](https://www.octobot.cloud/guides/octobot-interfaces/telegram/telegram-api?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=telegramChannelSignalEvaluator) for more information. diff --git a/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md b/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md index 7ca7172e0..daa2c17af 100644 --- a/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md +++ b/Evaluator/Social/signal_evaluator/resources/TelegramSignalEvaluator.md @@ -11,4 +11,4 @@ Remember that OctoBot can only see messages from a chat/group where its Telegram bot (in OctoBot configuration) has been invited. Keep also in mind that you need to disable the privacy mode of your Telegram bot to allow it to see group messages. -See [OctoBot docs about Telegram interface](https://www.octobot.info/interfaces/telegram-interface) for more information. +See [OctoBot docs about Telegram interface](https://www.octobot.cloud/guides/octobot-interfaces/telegram?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=telegramSignalEvaluator) for more information. diff --git a/README.md b/README.md index 3d271f47e..7437c4d16 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ This repository contains default evaluators, strategies, utilitary modules, inte Modules in this tentacles are installed in the **Default** folder of the associated module types -To add custom tentacles to your OctoBot, see the [dedicated docs page](https://developer.octobot.info/tentacles/customize-your-octobot). +To add custom tentacles to your OctoBot, see the [dedicated docs page](https://www.octobot.cloud/guides/octobot-tentacles-development/customize-your-octobot?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=octobot_tentacles_readme). ## Contributing to the official OctoBot Tentacles: 1. Create your own fork of this repo diff --git a/Services/Interfaces/web_interface/advanced_templates/advanced_strategy_optimizer.html b/Services/Interfaces/web_interface/advanced_templates/advanced_strategy_optimizer.html index 5bca798ea..b0b455cc0 100644 --- a/Services/Interfaces/web_interface/advanced_templates/advanced_strategy_optimizer.html +++ b/Services/Interfaces/web_interface/advanced_templates/advanced_strategy_optimizer.html @@ -5,7 +5,7 @@

Strategy optimizer - +

@@ -118,7 +118,7 @@

Strategy optimizer in progress

If you want to deeply test your strategy, compare its results in different situations and figure out the best settings for your traded markets, we suggest to check out the - + new strategy designer available on octobot.cloud pro plan. diff --git a/Services/Interfaces/web_interface/advanced_templates/advanced_tentacle_packages.html b/Services/Interfaces/web_interface/advanced_templates/advanced_tentacle_packages.html index 7f19e6ada..40d4db662 100644 --- a/Services/Interfaces/web_interface/advanced_templates/advanced_tentacle_packages.html +++ b/Services/Interfaces/web_interface/advanced_templates/advanced_tentacle_packages.html @@ -12,7 +12,7 @@

 Tentacle Packages - +

diff --git a/Services/Interfaces/web_interface/advanced_templates/advanced_tentacles.html b/Services/Interfaces/web_interface/advanced_templates/advanced_tentacles.html index f59a603ad..c61f0ba96 100644 --- a/Services/Interfaces/web_interface/advanced_templates/advanced_tentacles.html +++ b/Services/Interfaces/web_interface/advanced_templates/advanced_tentacles.html @@ -6,7 +6,7 @@

Installed Tentacles - + diff --git a/Services/Interfaces/web_interface/static/js/common/tutorial.js b/Services/Interfaces/web_interface/static/js/common/tutorial.js index b39e65f6a..3cc41c65e 100644 --- a/Services/Interfaces/web_interface/static/js/common/tutorial.js +++ b/Services/Interfaces/web_interface/static/js/common/tutorial.js @@ -119,7 +119,7 @@ _TUTORIALS = { }, { title: 'See also', - intro: `More details on ${getDocsLink("/configuration/profiles", "the profiles guide")}.` + intro: `More details on ${getDocsLink("/octobot-configuration/profiles?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=profiles_intro", "the profiles guide")}.` }, ] } @@ -202,7 +202,7 @@ _TUTORIALS = { }, { title: 'See also', - intro: `More details on ${getDocsLink("/usage/understanding-profitability", "the OctoBot docs")}.` + intro: `More details on ${getDocsLink("/octobot-usage/understanding-profitability?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=dashboard_intro", "the OctoBot docs")}.` }, ] } @@ -222,7 +222,7 @@ _TUTORIALS = { }, { title: 'See also', - intro: `More details on supported exchanges in the ${getExchangesDocsLink("", "OctoBot exchanges docs")}.` + intro: `More details on supported exchanges in the ${getExchangesDocsLink("?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=exchanges_config", "OctoBot exchanges docs")}.` }, ] } @@ -242,7 +242,7 @@ _TUTORIALS = { }, { title: 'See also', - intro: `More details the ${getDocsLink("/advanced_usage/backtesting-and-strategy-optimization", "backtesting guide")}.` + intro: `More details the ${getDocsLink("/octobot-advanced-usage/backtesting-and-strategy-optimization?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=backtesting_intro", "backtesting guide")}.` }, ] } diff --git a/Services/Interfaces/web_interface/templates/about.html b/Services/Interfaces/web_interface/templates/about.html index e1b857e99..383f8e647 100644 --- a/Services/Interfaces/web_interface/templates/about.html +++ b/Services/Interfaces/web_interface/templates/about.html @@ -52,7 +52,7 @@

Get more from OctoBot using OctoBot cloud

  • Deploy your OctoBot on the cloud and enjoy your OctoBot from anywhere while benefiting - from cloud exclusive features such as the strategy designer. + from cloud exclusive features such as the strategy designer.
  • @@ -77,7 +77,7 @@

    Help us to improve OctoBot

    Any question ? Please have a look at our - + Frequently ask question (FAQ) section first ! @@ -155,7 +155,7 @@

    Support the OctoBot project

    Registering to the beta tester group will grant you access to major new features weeks in advance as well as a direct communication channel to the OctoBot team to share your feedback and ideas before new versions are released to the public. - + More info on the beta tester program

    diff --git a/Services/Interfaces/web_interface/templates/accounts.html b/Services/Interfaces/web_interface/templates/accounts.html index 901d722cc..08ee22d83 100644 --- a/Services/Interfaces/web_interface/templates/accounts.html +++ b/Services/Interfaces/web_interface/templates/accounts.html @@ -135,7 +135,7 @@

    Exchanges

    Notifications - +

    diff --git a/Services/Interfaces/web_interface/templates/components/config/profiles.html b/Services/Interfaces/web_interface/templates/components/config/profiles.html index 7b6edfbb0..9f5da7627 100644 --- a/Services/Interfaces/web_interface/templates/components/config/profiles.html +++ b/Services/Interfaces/web_interface/templates/components/config/profiles.html @@ -242,7 +242,7 @@

    data-toggle="tooltip" title="Use this profile"> - +  

    diff --git a/Services/Interfaces/web_interface/templates/components/config/service_card.html b/Services/Interfaces/web_interface/templates/components/config/service_card.html index aa09ab262..86679e02b 100644 --- a/Services/Interfaces/web_interface/templates/components/config/service_card.html +++ b/Services/Interfaces/web_interface/templates/components/config/service_card.html @@ -21,7 +21,7 @@

    {% endif %}

    - +

    diff --git a/Services/Interfaces/web_interface/templates/config_tentacle.html b/Services/Interfaces/web_interface/templates/config_tentacle.html index 298c98396..d85230480 100644 --- a/Services/Interfaces/web_interface/templates/config_tentacle.html +++ b/Services/Interfaces/web_interface/templates/config_tentacle.html @@ -36,7 +36,7 @@

    Configuration role="button" data-toggle="tooltip" title="Save"> - +

    @@ -195,7 +195,7 @@

    Test configuration Activation required {% endif %} - +   {% if activated_trading_mode %} diff --git a/Services/Interfaces/web_interface/templates/index.html b/Services/Interfaces/web_interface/templates/index.html index 5e03fb38b..052ab26fc 100644 --- a/Services/Interfaces/web_interface/templates/index.html +++ b/Services/Interfaces/web_interface/templates/index.html @@ -53,7 +53,7 @@

    Find more information the recent changes and future plans on - our blog. + our blog.

    Please note that you will need to click "Forgot your password?" to set your password when first diff --git a/Services/Interfaces/web_interface/templates/login.html b/Services/Interfaces/web_interface/templates/login.html index 56f95614b..7b0413040 100644 --- a/Services/Interfaces/web_interface/templates/login.html +++ b/Services/Interfaces/web_interface/templates/login.html @@ -10,7 +10,7 @@

    Welcome back - +

    diff --git a/Services/Interfaces/web_interface/templates/octobot_help.html b/Services/Interfaces/web_interface/templates/octobot_help.html index 0a5f29309..1826cde41 100644 --- a/Services/Interfaces/web_interface/templates/octobot_help.html +++ b/Services/Interfaces/web_interface/templates/octobot_help.html @@ -20,8 +20,8 @@

    When using OctoBot, you will find these buttons: . They are triggering the in page help and contain links to the - OctoBot website or - OctoBot documentation article related + OctoBot website or + OctoBot guides explaining to the associated element.

    @@ -29,37 +29,37 @@

    Frequently asked questions

    We keep track of many of our community users questions so that everyone can benefit from the answers in - our dedicated FAQ. + our dedicated FAQ.

    Troubleshoot

    Some issues are pretty common and sometimes they are due to factors that are external to OctoBot. In the - troubleshoot section you will + troubleshoot section you will find many possible issues happening on various situations and how to fix them.

    OctoBot website

    - In the OctoBot website, you will find many resources on various subjects including: + In the OctoBot website, you will find many resources on various subjects including:
    • What is the OctoBot Project
    • -
    • - Video guides on OctoBot's setup and main features -
    • In depth insight regarding OctoBot, its design and philosophy

    - OctoBot documentation + OctoBot guides

    - In the OctoBot documentation, you will find many article on various subjects including: + In the OctoBot guides, you will find many articles to help you use OctoBot including:
      +
    • + Video guides on OctoBot's setup and main features +
    • Different ways to install OctoBot
    • @@ -69,9 +69,15 @@

    • OctoBot usage and interfaces
    • +
    • + Supported exchanges +
    • Advanced resources on OctoBot architecture, development guides and specific features
    • +
    • + OctoBot Script +

    diff --git a/Services/Interfaces/web_interface/templates/profile.html b/Services/Interfaces/web_interface/templates/profile.html index 3740ef9a8..ca80529c6 100644 --- a/Services/Interfaces/web_interface/templates/profile.html +++ b/Services/Interfaces/web_interface/templates/profile.html @@ -401,12 +401,12 @@

    {{ m_config_trader_card.config_trader_card(config_trading, "trading", "Trading settings", - link=OCTOBOT_DOCS_URL+"/configuration/profile-configuration#trading") }} + link=OCTOBOT_DOCS_URL+"/octobot-configuration/profile-configuration#trading") }} {{ m_config_trader_card.config_trader_card(config_trader, "trader", "Trader", - link=OCTOBOT_DOCS_URL+"/usage/simulator#real-trader") }} + link=OCTOBOT_DOCS_URL+"/octobot-usage/simulator#real-trader") }} {{ m_config_trader_card.config_trader_card(config_trader_simulator, "trader-simulator", - "Trader simulator", link=OCTOBOT_DOCS_URL+"/usage/simulator", + "Trader simulator", link=OCTOBOT_DOCS_URL+"/octobot-usage/simulator?utm_source=octobot&utm_medium=dk&utm_campaign=regular_open_source_content&utm_content=simulator_config", footer_text="Changes in the simulated starting portfolio will reset enabled exchanges simulated portfolio history.") }}

    diff --git a/Services/Services_bases/gpt_service/gpt.py b/Services/Services_bases/gpt_service/gpt.py index 9c0975832..e9a36fd5f 100644 --- a/Services/Services_bases/gpt_service/gpt.py +++ b/Services/Services_bases/gpt_service/gpt.py @@ -265,7 +265,7 @@ def get_required_config(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/interfaces/chatgpt-interface" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/chatgpt" def get_type(self) -> None: return services_constants.CONFIG_GPT diff --git a/Services/Services_bases/reddit_service/reddit.py b/Services/Services_bases/reddit_service/reddit.py index 5ce0b82b2..708db1e08 100644 --- a/Services/Services_bases/reddit_service/reddit.py +++ b/Services/Services_bases/reddit_service/reddit.py @@ -52,7 +52,7 @@ def get_required_config(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/interfaces/reddit-interface" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/reddit" @staticmethod def is_setup_correctly(config): diff --git a/Services/Services_bases/telegram_api_service/telegram_api.py b/Services/Services_bases/telegram_api_service/telegram_api.py index 8a3d23a3c..9c92c5526 100644 --- a/Services/Services_bases/telegram_api_service/telegram_api.py +++ b/Services/Services_bases/telegram_api_service/telegram_api.py @@ -69,7 +69,7 @@ def get_read_only_info(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/interfaces/telegram-interface/telegram-api" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/telegram/telegram-api" @staticmethod def is_setup_correctly(config): diff --git a/Services/Services_bases/telegram_service/telegram.py b/Services/Services_bases/telegram_service/telegram.py index 68d9e227f..23740832e 100644 --- a/Services/Services_bases/telegram_service/telegram.py +++ b/Services/Services_bases/telegram_service/telegram.py @@ -67,7 +67,7 @@ def get_read_only_info(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/interfaces/telegram-interface" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/telegram" @staticmethod def is_setup_correctly(config): diff --git a/Services/Services_bases/trading_view_service/trading_view.py b/Services/Services_bases/trading_view_service/trading_view.py index ae496caa1..97b44e17e 100644 --- a/Services/Services_bases/trading_view_service/trading_view.py +++ b/Services/Services_bases/trading_view_service/trading_view.py @@ -66,7 +66,7 @@ def get_read_only_info(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/webhooks/tradingview-webhook" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/tradingview" def get_endpoint(self) -> None: return None diff --git a/Services/Services_bases/twitter_service/twitter.py b/Services/Services_bases/twitter_service/twitter.py index eec72b319..848628b24 100644 --- a/Services/Services_bases/twitter_service/twitter.py +++ b/Services/Services_bases/twitter_service/twitter.py @@ -65,7 +65,7 @@ def get_read_only_info(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/interfaces/twitter-interface" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/twitter" @staticmethod def is_setup_correctly(config): diff --git a/Services/Services_bases/web_service/web.py b/Services/Services_bases/web_service/web.py index b64bb98d6..4dd8666d2 100644 --- a/Services/Services_bases/web_service/web.py +++ b/Services/Services_bases/web_service/web.py @@ -60,7 +60,7 @@ def get_required_config(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/interfaces/web-interface" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/web" @staticmethod def is_setup_correctly(config): diff --git a/Services/Services_bases/webhook_service/webhook.py b/Services/Services_bases/webhook_service/webhook.py index 6b7b11af2..c7597921f 100644 --- a/Services/Services_bases/webhook_service/webhook.py +++ b/Services/Services_bases/webhook_service/webhook.py @@ -119,7 +119,7 @@ def get_required_config(self): @classmethod def get_help_page(cls) -> str: - return f"{constants.OCTOBOT_DOCS_URL}/webhooks/using-a-webhook-with-octobot" + return f"{constants.OCTOBOT_DOCS_URL}/octobot-interfaces/tradingview/using-a-webhook" def get_type(self) -> None: return services_constants.CONFIG_WEBHOOK diff --git a/profiles/tradingview_trading/profile.json b/profiles/tradingview_trading/profile.json index afdbdd0c5..a2a3034a0 100644 --- a/profiles/tradingview_trading/profile.json +++ b/profiles/tradingview_trading/profile.json @@ -43,7 +43,7 @@ "avatar": "default_profile.png", "risk": 2, "complexity": 3, - "description": "TradingViewSignalsTrading is a profile configured to react on signals from tradingview.com. It requires to setup a trading view pro account and a webhook service. See how to configure at : https://www.octobot.info/webhooks/tradingview-webhook.", + "description": "TradingViewSignalsTrading is a profile configured to react on signals from tradingview.com. It requires to setup a trading view pro account and a webhook service. See how to configure your OctoBot for TradingView on https://www.octobot.cloud/guides/octobot-interfaces/tradingview", "id": "tradingview_trading", "name": "TradingView Signals Trading", "read_only": true