Skip to content
This repository has been archived by the owner on Jun 2, 2024. It is now read-only.

Commit

Permalink
implement status to not trigger streamer fetch on monitoring request
Browse files Browse the repository at this point in the history
- rework streamer fetching again
  • Loading branch information
Nickwasused committed May 4, 2024
1 parent ef4c0d9 commit f91619b
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 84 deletions.
138 changes: 65 additions & 73 deletions fetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,96 +2,88 @@
from dotenv import load_dotenv
from models import Streamer
from auth import Auth
import cachetools.func
import urllib3
import logging
import time
import os

logger = logging.getLogger(__name__)
load_dotenv()


class Handler:
def __init__(self):
self.streamers = []
self.client_id = os.getenv("CLIENT_ID")
self.game_id = os.getenv("GAME_ID")
self.lang = os.getenv("TWITCH_LANG")
self.default_url = f"https://api.twitch.tv/helix/streams?first=100&type=live&language={self.lang}&game_id={self.game_id}"
self.auth = Auth()
client_id = os.getenv("CLIENT_ID")
game_id = os.getenv("GAME_ID")
lang = os.getenv("TWITCH_LANG")
default_url = f"https://api.twitch.tv/helix/streams?first=100&type=live&language={lang}&game_id={game_id}"
auth = Auth()

def get_streamers(self) -> [int, float]:
start = time.perf_counter()
streamers: [Streamer] = []

with urllib3.PoolManager(num_pools=50) as http:
fetching = True
tmp_cursor = ""
http.headers = {
"Authorization": f"Bearer {self.auth.token}",
"content-type": "application/json",
"client-id": self.client_id,
}
@cachetools.func.ttl_cache(ttl=250)
def streamers() -> [Streamer]:
with urllib3.PoolManager(num_pools=50) as http:
fetching = True
tmp_cursor = ""
http.headers = {
"Authorization": f"Bearer {auth.token}",
"content-type": "application/json",
"client-id": client_id,
}
tmp_streamers = []

while fetching:
tmp_url = self.default_url
while fetching:
tmp_url = default_url

if tmp_cursor:
tmp_url += f"&after={tmp_cursor}"
if tmp_cursor:
tmp_url += f"&after={tmp_cursor}"

try:
stream_request = http.request("GET", tmp_url)
except Exception as e:
logger.error(f"exception while fetching streams: {e}")
fetching = False
continue

if stream_request.status == 401:
try:
stream_request = http.request("GET", tmp_url)
auth.get_token()
continue
except Exception as e:
logger.error(f"exception while fetching streams: {e}")
logger.error(f"exception while getting token: {e}")
fetching = False
continue

if stream_request.status == 401:
try:
self.auth.get_token()
continue
except Exception as e:
logger.error(f"exception while getting token: {e}")
fetching = False
continue

tmp_fetched_streams = stream_request.json()
if not tmp_fetched_streams:
logger.error("fetched streams but with error")
tmp_fetched_streams = stream_request.json()
if not tmp_fetched_streams:
logger.error("fetched streams but with error")
fetching = False
else:
tmp_streamers = [*tmp_streamers, *tmp_fetched_streams["data"]]

if (
not tmp_fetched_streams["pagination"]
or tmp_fetched_streams["pagination"]["cursor"] == "IA"
):
fetching = False
continue
else:
streamers = [*streamers, *tmp_fetched_streams["data"]]

if (
not tmp_fetched_streams["pagination"]
or tmp_fetched_streams["pagination"]["cursor"] == "IA"
):
fetching = False
continue
else:
tmp_cursor = tmp_fetched_streams["pagination"]["cursor"]

tmp_converted_streams: [Streamer] = []
for item in streamers:
del item["tag_ids"]
tmp_converted_streams.append(Streamer(**item))

# deduplication
tmp_converted_streams = list(dict.fromkeys(tmp_converted_streams))
tmp_count = len(tmp_converted_streams)
self.streamers = tmp_converted_streams
end = time.perf_counter() - start
return tmp_count, end

def filter_streams(self, query: str):
query_words = query.split()
filtered_streamers = [
streamer
for streamer in self.streamers
if all(word.lower() in streamer.title.lower() for word in query_words)
]

return filtered_streamers

def __exit__(self, exc_type, exc_val, exc_tb):
self.streamers = []
tmp_cursor = tmp_fetched_streams["pagination"]["cursor"]

tmp_converted_streams: [Streamer] = []
for item in tmp_streamers:
del item["tag_ids"]
tmp_converted_streams.append(Streamer(**item))

# deduplication
tmp_converted_streams = list(dict.fromkeys(tmp_converted_streams))
return tmp_converted_streams


def filter_streams(query: str):
query_words = query.split()
filtered_streamers = [
streamer
for streamer in streamers()
if all(word.lower() in streamer.title.lower() for word in query_words)
]

return filtered_streamers
18 changes: 7 additions & 11 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,7 @@
from fastapi.encoders import jsonable_encoder
from models import Streamer, ResponseModel
from fastapi import FastAPI
from fetch import Handler
import cachetools.func
from fetch import filter_streams
import logging
import uvicorn

Expand All @@ -21,20 +20,12 @@
)


@cachetools.func.ttl_cache(ttl=10 * 60)
def get_data():
tmp_handler = Handler()
tmp_handler.get_streamers()
return tmp_handler


@app.get("/search")
def search(
query: str | None = None,
) -> ResponseModel:
if query:
tmp_handler = get_data()
tmp_streamers: list[Streamer] = tmp_handler.filter_streams(query)
tmp_streamers: list[Streamer] = filter_streams(query)
return jsonable_encoder(
{
"status": 200,
Expand All @@ -48,6 +39,11 @@ def search(
)


@app.get("/status")
def status() -> ResponseModel:
return jsonable_encoder({"status": 200, "message": "ok", "data": []})


@app.get("/")
def index() -> RedirectResponse:
return RedirectResponse("/docs", 301)
Expand Down

0 comments on commit f91619b

Please sign in to comment.