Skip to content

Commit

Permalink
feat(app):
Browse files Browse the repository at this point in the history
- edit logging
  • Loading branch information
MorvanZhou committed Jul 19, 2024
1 parent db197da commit 781eb19
Show file tree
Hide file tree
Showing 6 changed files with 28 additions and 28 deletions.
10 changes: 5 additions & 5 deletions src/retk/core/ai/llm/api/aliyun.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,12 @@ async def complete(
return "Aliyun model error, please try later", code
rcode = rj.get("code")
if rcode is not None:
logger.error(f"ReqId={req_id} | Aliyun {model} | error: code={rj['code']} {rj['message']}")
logger.error(f"rid='{req_id}' | Aliyun {model} | error: code={rj['code']} {rj['message']}")
if rcode == "Throttling.RateQuota":
return "Aliyun model rate limit exceeded", const.CodeEnum.LLM_API_LIMIT_EXCEEDED
return "Aliyun model error, please try later", const.CodeEnum.LLM_SERVICE_ERROR

logger.info(f"ReqId={req_id} | Aliyun {model} | usage: {rj['usage']}")
logger.info(f"rid='{req_id}' | Aliyun {model} | usage: {rj['usage']}")
return rj["output"]["choices"][0]["message"]["content"], const.CodeEnum.OK

async def stream_complete(
Expand Down Expand Up @@ -152,16 +152,16 @@ async def stream_complete(
try:
json_str = s[5:]
except IndexError:
logger.error(f"ReqId={req_id} | Aliyun {model} | stream error: string={s}")
logger.error(f"rid='{req_id}' | Aliyun {model} | stream error: string={s}")
continue
try:
json_data = json.loads(json_str)
except json.JSONDecodeError as e:
logger.error(f"ReqId={req_id} | Aliyun {model} | stream error: string={s}, error={e}")
logger.error(f"rid='{req_id}' | Aliyun {model} | stream error: string={s}, error={e}")
continue
choice = json_data["output"]["choices"][0]
if choice["finish_reason"] != "null":
logger.info(f"ReqId={req_id} | Aliyun {model} | usage: {json_data['usage']}")
logger.info(f"rid='{req_id}' | Aliyun {model} | usage: {json_data['usage']}")
break
txt += choice["message"]["content"]
yield txt.encode("utf-8"), code
Expand Down
14 changes: 7 additions & 7 deletions src/retk/core/ai/llm/api/baidu.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,11 +105,11 @@ async def set_token(self, req_id: str = None):
}
)
if resp.status_code != 200:
logger.error(f"ReqId={req_id} | Baidu | error: {resp.text}")
logger.error(f"rid='{req_id}' | Baidu | error: {resp.text}")
return ""
rj = resp.json()
if rj.get("error") is not None:
logger.error(f"ReqId={req_id} | Baidu | token error: {rj['error_description']}")
logger.error(f"rid='{req_id}' | Baidu | token error: {rj['error_description']}")
return ""

self.token_expires_at = rj["expires_in"] + datetime.now().timestamp()
Expand Down Expand Up @@ -152,9 +152,9 @@ async def complete(
return "Model error, please try later", code

if resp.get("error_code") is not None:
logger.error(f"ReqId={req_id} | Baidu {model} | error: code={resp['error_code']} {resp['error_msg']}")
logger.error(f"rid='{req_id}' | Baidu {model} | error: code={resp['error_code']} {resp['error_msg']}")
return resp["error_msg"], const.CodeEnum.LLM_SERVICE_ERROR
logger.info(f"ReqId={req_id} | Baidu {model} | usage: {resp['usage']}")
logger.info(f"rid='{req_id}' | Baidu {model} | usage: {resp['usage']}")
return resp["result"], const.CodeEnum.OK

async def stream_complete(
Expand Down Expand Up @@ -187,16 +187,16 @@ async def stream_complete(
try:
json_str = s[6:]
except IndexError:
logger.error(f"ReqId={req_id} | Baidu {model} | stream error: string={s}")
logger.error(f"rid='{req_id}' | Baidu {model} | stream error: string={s}")
continue
try:
json_data = json.loads(json_str)
except json.JSONDecodeError as e:
logger.error(f"ReqId={req_id} | Baidu {model} | stream error: string={s}, error={e}")
logger.error(f"rid='{req_id}' | Baidu {model} | stream error: string={s}, error={e}")
continue

if json_data["is_end"]:
logger.info(f"ReqId={req_id} | Baidu {model} | usage: {json_data['usage']}")
logger.info(f"rid='{req_id}' | Baidu {model} | usage: {json_data['usage']}")
break
txt += json_data["result"]
yield txt.encode("utf-8"), code
Expand Down
8 changes: 4 additions & 4 deletions src/retk/core/ai/llm/api/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,14 +68,14 @@ async def _complete(
httpx.ConnectError,
httpx.ReadTimeout,
) as e:
logger.error(f"ReqId={req_id} Model error: {e}")
logger.error(f"rid='{req_id}' Model error: {e}")
return {}, const.CodeEnum.LLM_TIMEOUT
except httpx.HTTPError as e:
logger.error(f"ReqId={req_id} Model error: {e}")
logger.error(f"rid='{req_id}' Model error: {e}")
return {}, const.CodeEnum.LLM_SERVICE_ERROR
if resp.status_code != 200:
txt = resp.text.replace('\n', '')
logger.error(f"ReqId={req_id} Model error: {txt}")
logger.error(f"rid='{req_id}' Model error: {txt}")
return {}, const.CodeEnum.LLM_SERVICE_ERROR

rj = resp.json()
Expand Down Expand Up @@ -110,7 +110,7 @@ async def _stream_complete(
) as resp:
if resp.status_code != 200:
await resp.aread()
logger.error(f"ReqId={req_id} Model error: {resp.text}")
logger.error(f"rid='{req_id}' Model error: {resp.text}")
yield resp.content, const.CodeEnum.LLM_SERVICE_ERROR
await client.aclose()
return
Expand Down
6 changes: 3 additions & 3 deletions src/retk/core/ai/llm/api/openai.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,7 @@ async def complete(
return "", code
if rj.get("error") is not None:
return rj["error"]["message"], const.CodeEnum.LLM_SERVICE_ERROR
logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {rj['usage']}")
logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {rj['usage']}")
return rj["choices"][0]["message"]["content"], code

async def stream_complete(
Expand All @@ -126,15 +126,15 @@ async def stream_complete(
try:
json_data = json.loads(json_str)
except json.JSONDecodeError:
logger.error(f"ReqId={req_id} | {self.__class__.__name__} {model} | stream error: json={json_str}")
logger.error(f"rid='{req_id}' | {self.__class__.__name__} {model} | stream error: json={json_str}")
continue
choice = json_data["choices"][0]
if choice["finish_reason"] is not None:
try:
usage = json_data["usage"]
except KeyError:
usage = choice["usage"]
logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {usage}")
logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {usage}")
break
txt += choice["delta"]["content"]
yield txt.encode("utf-8"), code
Expand Down
10 changes: 5 additions & 5 deletions src/retk/core/ai/llm/api/tencent.py
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ def get_payload(self, model: Optional[str], messages: MessagesType, stream: bool
def handle_err(req_id: str, error: Dict):
msg = error.get("Message")
code = error.get("Code")
logger.error(f"ReqId={req_id} | Tencent | error code={code}, msg={msg}")
logger.error(f"rid='{req_id}' | Tencent | error code={code}, msg={msg}")
if code == 4001:
ccode = const.CodeEnum.LLM_TIMEOUT
elif code == "LimitExceeded":
Expand All @@ -155,7 +155,7 @@ def handle_normal_response(req_id: str, resp: Dict, stream: bool) -> Tuple[str,
return "No response", const.CodeEnum.LLM_NO_CHOICE
choice = choices[0]
m = choice["Delta"] if stream else choice["Message"]
logger.info(f"ReqId={req_id} | Tencent | usage: {resp['Usage']}")
logger.info(f"rid='{req_id}' | Tencent | usage: {resp['Usage']}")
return m["Content"], const.CodeEnum.OK

async def complete(
Expand Down Expand Up @@ -212,16 +212,16 @@ async def stream_complete(
try:
json_str = s[6:]
except IndexError:
logger.error(f"ReqId={req_id} | Tencent {model} | stream error: string={s}")
logger.error(f"rid='{req_id}' | Tencent {model} | stream error: string={s}")
continue
try:
json_data = json.loads(json_str)
except json.JSONDecodeError as e:
logger.error(f"ReqId={req_id} | Tencent {model} | stream error: string={s}, error={e}")
logger.error(f"rid='{req_id}' | Tencent {model} | stream error: string={s}, error={e}")
continue
choice = json_data["Choices"][0]
if choice["FinishReason"] != "":
logger.info(f"ReqId={req_id} | Tencent {model} | usage: {json_data['Usage']}")
logger.info(f"rid='{req_id}' | Tencent {model} | usage: {json_data['Usage']}")
break
content = choice["Delta"]["Content"]
txt += content
Expand Down
8 changes: 4 additions & 4 deletions src/retk/core/ai/llm/api/xfyun.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ async def complete(
return "", code
if rj["code"] != 0:
return rj["message"], const.CodeEnum.LLM_SERVICE_ERROR
logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {rj['usage']}")
logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {rj['usage']}")
return rj["choices"][0]["message"]["content"], code

async def stream_complete(
Expand Down Expand Up @@ -122,11 +122,11 @@ async def stream_complete(
try:
json_data = json.loads(json_str)
except json.JSONDecodeError:
logger.error(f"ReqId={req_id} | {self.__class__.__name__} {model} | stream error: json={json_str}")
logger.error(f"rid='{req_id}' | {self.__class__.__name__} {model} | stream error: json={json_str}")
continue
if json_data["code"] != 0:
logger.error(
f"ReqId={req_id} | {self.__class__.__name__} {model} | error:"
f"rid='{req_id}' | {self.__class__.__name__} {model} | error:"
f" code={json_data['code']} {json_data['message']}"
)
break
Expand All @@ -136,7 +136,7 @@ async def stream_complete(
except KeyError:
pass
else:
logger.info(f"ReqId={req_id} | {self.__class__.__name__} {model} | usage: {usage}")
logger.info(f"rid='{req_id}' | {self.__class__.__name__} {model} | usage: {usage}")
break
txt += choice["delta"]["content"]
yield txt.encode("utf-8"), code
Expand Down

0 comments on commit 781eb19

Please sign in to comment.