blob: ef89ca1895a16c23116e43d49f0afa1d51cf624b (
plain) (
tree)
|
|
#!/usr/bin/env python3
# _*_ coding=utf-8 _*_
import argparse
import logging
from newspaper import Article, build, Config
from bs4 import BeautifulSoup
from contextlib import closing
from requests import get, Response
from requests.exceptions import RequestException
from re import findall
from readability import Document
from gtts import gTTS
from datetime import datetime as time
class Argparser(object):
def __init__(self):
parser = argparse.ArgumentParser()
parser.add_argument(
"--source",
type=str, help="the url where the urls to be extracted reside")
parser.add_argument("--bool", action="store_true",
help="bool", default=False)
self.args = parser.parse_args()
# TODO-maybe actually really do some logging
def logError(err: RequestException) -> None:
"""logs the errors"""
logging.exception(err)
def isAGoodResponse(resp: Response) -> bool:
"""checks whether the get we sent got a 200 response"""
content_type = resp.headers['Content-Type'].lower()
return (resp.status_code == 200 and
content_type is not None and content_type.find("html") > -1)
def simpleGet(url: str) -> bytes:
"""issues a simple get request to download a website"""
try:
with closing(get(url, stream=True)) as resp:
if isAGoodResponse(resp):
return resp.content
else:
return None
except RequestException as e:
logError("Error during requests to {0} : {1}".format(url, str(e)))
return None
def getURLS(source: str) -> dict:
"""extracts the urls from a website"""
result = dict()
raw_ml = simpleGet(source)
ml = BeautifulSoup(raw_ml, "lxml")
ml_str = repr(ml)
tmp = open("/tmp/riecher", "w")
tmp.write(ml_str)
tmp.close()
tmp = open("/tmp/riecher", "r")
dump_list = []
for line in tmp:
dummy = findall(
'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*(),]|'
r'(?:%[0-9a-fA-F][0-9a-fA-F]))+', line)
dump_list += dummy
for elem in dump_list:
result[elem] = elem
tmp.close()
return result
def configNews(config: Config) -> None:
"""configures newspaper"""
config.fetch_images = False
config.keep_article_html = True
config.memoize_articles = False
config.browser_user_agent = "Chrome/91.0.4464.5"
def main() -> None:
argparser = Argparser()
config = Config()
configNews(config)
urls = getURLS(argparser.args.source)
for url in urls:
parser = build(url)
for article in parser.articles:
a = Article(article.url)
try:
a.download()
a.parse()
doc = Document(a.html)
print(doc.summary())
if a.text != '':
tts = gTTS(a.text)
tts.save(time.today().strftime("%b-%d-%Y-%M-%S-%f")+".mp3")
except Exception as e:
logging.exception(e)
if __name__ == "__main__":
main()
|