From 781eb19a7ed818776215a7e153bc8d1562b41266 Mon Sep 17 00:00:00 2001 From: morvanzhou Date: Sat, 20 Jul 2024 00:59:55 +0800 Subject: [PATCH] feat(app): - edit logging --- src/retk/core/ai/llm/api/aliyun.py | 10 +++++----- src/retk/core/ai/llm/api/baidu.py | 14 +++++++------- src/retk/core/ai/llm/api/base.py | 8 ++++---- src/retk/core/ai/llm/api/openai.py | 6 +++--- src/retk/core/ai/llm/api/tencent.py | 10 +++++----- src/retk/core/ai/llm/api/xfyun.py | 8 ++++---- 6 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/retk/core/ai/llm/api/aliyun.py b/src/retk/core/ai/llm/api/aliyun.py index 05a6215..22e8d33 100644 --- a/src/retk/core/ai/llm/api/aliyun.py +++ b/src/retk/core/ai/llm/api/aliyun.py @@ -119,12 +119,12 @@ async def complete( return "Aliyun model error, please try later", code rcode = rj.get("code") if rcode is not None: - logger.error(f"ReqId={req_id} | Aliyun {model} | error: code={rj['code']} {rj['message']}") + logger.error(f"rid='{req_id}' | Aliyun {model} | error: code={rj['code']} {rj['message']}") if rcode == "Throttling.RateQuota": return "Aliyun model rate limit exceeded", const.CodeEnum.LLM_API_LIMIT_EXCEEDED return "Aliyun model error, please try later", const.CodeEnum.LLM_SERVICE_ERROR - logger.info(f"ReqId={req_id} | Aliyun {model} | usage: {rj['usage']}") + logger.info(f"rid='{req_id}' | Aliyun {model} | usage: {rj['usage']}") return rj["output"]["choices"][0]["message"]["content"], const.CodeEnum.OK async def stream_complete( @@ -152,16 +152,16 @@ async def stream_complete( try: json_str = s[5:] except IndexError: - logger.error(f"ReqId={req_id} | Aliyun {model} | stream error: string={s}") + logger.error(f"rid='{req_id}' | Aliyun {model} | stream error: string={s}") continue try: json_data = json.loads(json_str) except json.JSONDecodeError as e: - logger.error(f"ReqId={req_id} | Aliyun {model} | stream error: string={s}, error={e}") + logger.error(f"rid='{req_id}' | Aliyun {model} | stream error: string={s}, error={e}") continue choice = json_data["output"]["choices"][0] if choice["finish_reason"] != "null": - logger.info(f"ReqId={req_id} | Aliyun {model} | usage: {json_data['usage']}") + logger.info(f"rid='{req_id}' | Aliyun {model} | usage: {json_data['usage']}") break txt += choice["message"]["content"] yield txt.encode("utf-8"), code diff --git a/src/retk/core/ai/llm/api/baidu.py b/src/retk/core/ai/llm/api/baidu.py index 68a564a..91a8fc8 100644 --- a/src/retk/core/ai/llm/api/baidu.py +++ b/src/retk/core/ai/llm/api/baidu.py @@ -105,11 +105,11 @@ async def set_token(self, req_id: str = None): } ) if resp.status_code != 200: - logger.error(f"ReqId={req_id} | Baidu | error: {resp.text}") + logger.error(f"rid='{req_id}' | Baidu | error: {resp.text}") return "" rj = resp.json() if rj.get("error") is not None: - logger.error(f"ReqId={req_id} | Baidu | token error: {rj['error_description']}") + logger.error(f"rid='{req_id}' | Baidu | token error: {rj['error_description']}") return "" self.token_expires_at = rj["expires_in"] + datetime.now().timestamp() @@ -152,9 +152,9 @@ async def complete( return "Model error, please try later", code if resp.get("error_code") is not None: - logger.error(f"ReqId={req_id} | Baidu {model} | error: code={resp['error_code']} {resp['error_msg']}") + logger.error(f"rid='{req_id}' | Baidu {model} | error: code={resp['error_code']} {resp['error_msg']}") return resp["error_msg"], const.CodeEnum.LLM_SERVICE_ERROR - logger.info(f"ReqId={req_id} | Baidu {model} | usage: {resp['usage']}") + logger.info(f"rid='{req_id}' | Baidu {model} | usage: {resp['usage']}") return resp["result"], const.CodeEnum.OK async def stream_complete( @@ -187,16 +187,16 @@ async def stream_complete( try: json_str = s[6:] except IndexError: - logger.error(f"ReqId={req_id} | Baidu {model} | stream error: string={s}") + logger.error(f"rid='{req_id}' | Baidu {model} | stream error: string={s}") continue try: json_data = json.loads(json_str) except json.JSONDecodeError as e: - logger.error(f"ReqId={req_id} | Baidu {model} | stream error: string={s}, error={e}") + logger.error(f"rid='{req_id}' | Baidu {model} | stream error: string={s}, error={e}") continue if json_data["is_end"]: - logger.info(f"ReqId={req_id} | Baidu {model} | usage: {json_data['usage']}") + logger.info(f"rid='{req_id}' | Baidu {model} | usage: {json_data['usage']}") break txt += json_data["result"] yield txt.encode("utf-8"), code diff --git a/src/retk/core/ai/llm/api/base.py b/src/retk/core/ai/llm/api/base.py index 9f08d19..0533b9c 100644 --- a/src/retk/core/ai/llm/api/base.py +++ b/src/retk/core/ai/llm/api/base.py @@ -68,14 +68,14 @@ async def _complete( httpx.ConnectError, httpx.ReadTimeout, ) as e: - logger.error(f"ReqId={req_id} Model error: {e}") + logger.error(f"rid='{req_id}' Model error: {e}") return {}, const.CodeEnum.LLM_TIMEOUT except httpx.HTTPError as e: - logger.error(f"ReqId={req_id} Model error: {e}") + logger.error(f"rid='{req_id}' Model error: {e}") return {}, const.CodeEnum.LLM_SERVICE_ERROR if resp.status_code != 200: txt = resp.text.replace('\n', '') - logger.error(f"ReqId={req_id} Model error: {txt}") + logger.error(f"rid='{req_id}' Model error: {txt}") return {}, const.CodeEnum.LLM_SERVICE_ERROR rj = resp.json() @@ -110,7 +110,7 @@ async def _stream_complete( ) as resp: if resp.status_code != 200: await resp.aread() - logger.error(f"ReqId={req_id} Model error: {resp.text}") + logger.error(f"rid='{req_id}' Model error: {resp.text}") yield resp.content, const.CodeEnum.LLM_SERVICE_ERROR await client.aclose() return diff --git a/src/retk/core/ai/llm/api/openai.py b/src/retk/core/ai/llm/api/openai.py index 2fe6655..5e2b653 100644 --- a/src/retk/core/ai/llm/api/openai.py +++ b/src/retk/core/ai/llm/api/openai.py @@ -100,7 +100,7 @@ async def complete( return "", code if rj.get("error") is not None: return rj["error"]["message"], const.CodeEnum.LLM_SERVICE_ERROR - logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {rj['usage']}") + logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {rj['usage']}") return rj["choices"][0]["message"]["content"], code async def stream_complete( @@ -126,7 +126,7 @@ async def stream_complete( try: json_data = json.loads(json_str) except json.JSONDecodeError: - logger.error(f"ReqId={req_id} | {self.__class__.__name__} {model} | stream error: json={json_str}") + logger.error(f"rid='{req_id}' | {self.__class__.__name__} {model} | stream error: json={json_str}") continue choice = json_data["choices"][0] if choice["finish_reason"] is not None: @@ -134,7 +134,7 @@ async def stream_complete( usage = json_data["usage"] except KeyError: usage = choice["usage"] - logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {usage}") + logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {usage}") break txt += choice["delta"]["content"] yield txt.encode("utf-8"), code diff --git a/src/retk/core/ai/llm/api/tencent.py b/src/retk/core/ai/llm/api/tencent.py index 187f3e5..2714512 100644 --- a/src/retk/core/ai/llm/api/tencent.py +++ b/src/retk/core/ai/llm/api/tencent.py @@ -139,7 +139,7 @@ def get_payload(self, model: Optional[str], messages: MessagesType, stream: bool def handle_err(req_id: str, error: Dict): msg = error.get("Message") code = error.get("Code") - logger.error(f"ReqId={req_id} | Tencent | error code={code}, msg={msg}") + logger.error(f"rid='{req_id}' | Tencent | error code={code}, msg={msg}") if code == 4001: ccode = const.CodeEnum.LLM_TIMEOUT elif code == "LimitExceeded": @@ -155,7 +155,7 @@ def handle_normal_response(req_id: str, resp: Dict, stream: bool) -> Tuple[str, return "No response", const.CodeEnum.LLM_NO_CHOICE choice = choices[0] m = choice["Delta"] if stream else choice["Message"] - logger.info(f"ReqId={req_id} | Tencent | usage: {resp['Usage']}") + logger.info(f"rid='{req_id}' | Tencent | usage: {resp['Usage']}") return m["Content"], const.CodeEnum.OK async def complete( @@ -212,16 +212,16 @@ async def stream_complete( try: json_str = s[6:] except IndexError: - logger.error(f"ReqId={req_id} | Tencent {model} | stream error: string={s}") + logger.error(f"rid='{req_id}' | Tencent {model} | stream error: string={s}") continue try: json_data = json.loads(json_str) except json.JSONDecodeError as e: - logger.error(f"ReqId={req_id} | Tencent {model} | stream error: string={s}, error={e}") + logger.error(f"rid='{req_id}' | Tencent {model} | stream error: string={s}, error={e}") continue choice = json_data["Choices"][0] if choice["FinishReason"] != "": - logger.info(f"ReqId={req_id} | Tencent {model} | usage: {json_data['Usage']}") + logger.info(f"rid='{req_id}' | Tencent {model} | usage: {json_data['Usage']}") break content = choice["Delta"]["Content"] txt += content diff --git a/src/retk/core/ai/llm/api/xfyun.py b/src/retk/core/ai/llm/api/xfyun.py index 2ef4982..8d13c17 100644 --- a/src/retk/core/ai/llm/api/xfyun.py +++ b/src/retk/core/ai/llm/api/xfyun.py @@ -94,7 +94,7 @@ async def complete( return "", code if rj["code"] != 0: return rj["message"], const.CodeEnum.LLM_SERVICE_ERROR - logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {rj['usage']}") + logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {rj['usage']}") return rj["choices"][0]["message"]["content"], code async def stream_complete( @@ -122,11 +122,11 @@ async def stream_complete( try: json_data = json.loads(json_str) except json.JSONDecodeError: - logger.error(f"ReqId={req_id} | {self.__class__.__name__} {model} | stream error: json={json_str}") + logger.error(f"rid='{req_id}' | {self.__class__.__name__} {model} | stream error: json={json_str}") continue if json_data["code"] != 0: logger.error( - f"ReqId={req_id} | {self.__class__.__name__} {model} | error:" + f"rid='{req_id}' | {self.__class__.__name__} {model} | error:" f" code={json_data['code']} {json_data['message']}" ) break @@ -136,7 +136,7 @@ async def stream_complete( except KeyError: pass else: - logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {usage}") + logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {usage}") break txt += choice["delta"]["content"] yield txt.encode("utf-8"), code