Skip to content

Commit

Permalink
Support advanced search results
Browse files Browse the repository at this point in the history
  • Loading branch information
Nv7-GitHub committed Feb 11, 2022
1 parent 058505a commit be88ef6
Show file tree
Hide file tree
Showing 2 changed files with 206 additions and 28 deletions.
152 changes: 152 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class

# C extensions
*.so

# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST

# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec

# Installer logs
pip-log.txt
pip-delete-this-directory.txt

# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/

# Translations
*.mo
*.pot

# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal

# Flask stuff:
instance/
.webassets-cache

# Scrapy stuff:
.scrapy

# Sphinx documentation
docs/_build/

# PyBuilder
.pybuilder/
target/

# Jupyter Notebook
.ipynb_checkpoints

# IPython
profile_default/
ipython_config.py

# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version

# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock

# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock

# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/

# Celery stuff
celerybeat-schedule
celerybeat.pid

# SageMath parsed files
*.sage.py

# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/

# Spyder project settings
.spyderproject
.spyproject

# Rope project settings
.ropeproject

# mkdocs documentation
/site

# mypy
.mypy_cache/
.dmypy.json
dmypy.json

# Pyre type checker
.pyre/

# pytype static type analyzer
.pytype/

# Cython debug symbols
cython_debug/

# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
82 changes: 54 additions & 28 deletions googlesearch/__init__.py
Original file line number Diff line number Diff line change
@@ -1,37 +1,63 @@
from bs4 import BeautifulSoup
from requests import get

usr_agent = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}

def search(term, num_results=10, lang="en", proxy=None):
usr_agent = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/61.0.3163.100 Safari/537.36'}

def fetch_results(search_term, number_results, language_code):
escaped_search_term = search_term.replace(' ', '+')

google_url = 'https://www.google.com/search?q={}&num={}&hl={}'.format(escaped_search_term, number_results+1,
language_code)
proxies = None
if proxy:
if proxy[:5]=="https":
proxies = {"https":proxy}
else:
proxies = {"http":proxy}

response = get(google_url, headers=usr_agent, proxies=proxies)
response.raise_for_status()

return response.text

def parse_results(raw_html):
soup = BeautifulSoup(raw_html, 'html.parser')
def _req(term, results, lang, start, proxies):
resp = get(
url="https://www.google.com/search",
headers=usr_agent,
params=dict(
q = term,
num = results + 2, # Prevents multiple requests
hl = lang,
start = start,
),
proxies=proxies,
)
resp.raise_for_status()
return resp

class SearchResult:
def __init__(self, url, title, description):
self.url = url
self.title = title
self.description = description

def __repr__(self):
return f"SearchResult(url={self.url}, title={self.title}, description={self.description})"

def search(term, num_results=10, lang="en", proxy=None, advanced=False):
escaped_term = term.replace(' ', '+')

# Proxy
proxies = None
if proxy:
if proxy[:5]=="https":
proxies = {"https": proxy}
else:
proxies = {"http": proxy}

# Fetch
start = 0
while start < num_results:
# Send request
resp = _req(escaped_term, num_results-start, lang, start, proxies)

# Parse
soup = BeautifulSoup(resp.text, 'html.parser')
result_block = soup.find_all('div', attrs={'class': 'g'})
for result in result_block:
# Find link, title, description
link = result.find('a', href=True)
title = result.find('h3')
if link and title:
yield link['href']
description_box = result.find('div', {'style': '-webkit-line-clamp:2'})
if description_box:
description = description_box.find('span')
if link and title and description:
start += 1
if advanced:
yield SearchResult(link['href'], title.text, description.text)
else:
yield link['href']

html = fetch_results(term, num_results, lang)
return list(parse_results(html))

0 comments on commit be88ef6

Please sign in to comment.