web_reader_tool.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337
  1. import hashlib
  2. import json
  3. import os
  4. import re
  5. import site
  6. import subprocess
  7. import tempfile
  8. import unicodedata
  9. from contextlib import contextmanager
  10. import requests
  11. from bs4 import BeautifulSoup, CData, Comment, NavigableString
  12. from newspaper import Article
  13. from regex import regex
  14. from core.rag.extractor import extract_processor
  15. from core.rag.extractor.extract_processor import ExtractProcessor
  16. FULL_TEMPLATE = """
  17. TITLE: {title}
  18. AUTHORS: {authors}
  19. PUBLISH DATE: {publish_date}
  20. TOP_IMAGE_URL: {top_image}
  21. TEXT:
  22. {text}
  23. """
  24. def page_result(text: str, cursor: int, max_length: int) -> str:
  25. """Page through `text` and return a substring of `max_length` characters starting from `cursor`."""
  26. return text[cursor: cursor + max_length]
  27. def get_url(url: str, user_agent: str = None) -> str:
  28. """Fetch URL and return the contents as a string."""
  29. headers = {
  30. "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36"
  31. }
  32. if user_agent:
  33. headers["User-Agent"] = user_agent
  34. supported_content_types = extract_processor.SUPPORT_URL_CONTENT_TYPES + ["text/html"]
  35. response = requests.get(url, headers=headers, allow_redirects=True, timeout=(5, 10))
  36. if response.status_code != 200:
  37. return "URL returned status code {}.".format(response.status_code)
  38. # check content-type
  39. main_content_type = response.headers.get('Content-Type').split(';')[0].strip()
  40. if main_content_type not in supported_content_types:
  41. return "Unsupported content-type [{}] of URL.".format(main_content_type)
  42. if main_content_type in extract_processor.SUPPORT_URL_CONTENT_TYPES:
  43. return ExtractProcessor.load_from_url(url, return_text=True)
  44. a = extract_using_readabilipy(response.text)
  45. if not a['plain_text'] or not a['plain_text'].strip():
  46. return get_url_from_newspaper3k(url)
  47. res = FULL_TEMPLATE.format(
  48. title=a['title'],
  49. authors=a['byline'],
  50. publish_date=a['date'],
  51. top_image="",
  52. text=a['plain_text'] if a['plain_text'] else "",
  53. )
  54. return res
  55. def get_url_from_newspaper3k(url: str) -> str:
  56. a = Article(url)
  57. a.download()
  58. a.parse()
  59. res = FULL_TEMPLATE.format(
  60. title=a.title,
  61. authors=a.authors,
  62. publish_date=a.publish_date,
  63. top_image=a.top_image,
  64. text=a.text,
  65. )
  66. return res
  67. def extract_using_readabilipy(html):
  68. with tempfile.NamedTemporaryFile(delete=False, mode='w+') as f_html:
  69. f_html.write(html)
  70. f_html.close()
  71. html_path = f_html.name
  72. # Call Mozilla's Readability.js Readability.parse() function via node, writing output to a temporary file
  73. article_json_path = html_path + ".json"
  74. jsdir = os.path.join(find_module_path('readabilipy'), 'javascript')
  75. with chdir(jsdir):
  76. subprocess.check_call(["node", "ExtractArticle.js", "-i", html_path, "-o", article_json_path])
  77. # Read output of call to Readability.parse() from JSON file and return as Python dictionary
  78. with open(article_json_path, encoding="utf-8") as json_file:
  79. input_json = json.loads(json_file.read())
  80. # Deleting files after processing
  81. os.unlink(article_json_path)
  82. os.unlink(html_path)
  83. article_json = {
  84. "title": None,
  85. "byline": None,
  86. "date": None,
  87. "content": None,
  88. "plain_content": None,
  89. "plain_text": None
  90. }
  91. # Populate article fields from readability fields where present
  92. if input_json:
  93. if "title" in input_json and input_json["title"]:
  94. article_json["title"] = input_json["title"]
  95. if "byline" in input_json and input_json["byline"]:
  96. article_json["byline"] = input_json["byline"]
  97. if "date" in input_json and input_json["date"]:
  98. article_json["date"] = input_json["date"]
  99. if "content" in input_json and input_json["content"]:
  100. article_json["content"] = input_json["content"]
  101. article_json["plain_content"] = plain_content(article_json["content"], False, False)
  102. article_json["plain_text"] = extract_text_blocks_as_plain_text(article_json["plain_content"])
  103. if "textContent" in input_json and input_json["textContent"]:
  104. article_json["plain_text"] = input_json["textContent"]
  105. article_json["plain_text"] = re.sub(r'\n\s*\n', '\n', article_json["plain_text"])
  106. return article_json
  107. def find_module_path(module_name):
  108. for package_path in site.getsitepackages():
  109. potential_path = os.path.join(package_path, module_name)
  110. if os.path.exists(potential_path):
  111. return potential_path
  112. return None
  113. @contextmanager
  114. def chdir(path):
  115. """Change directory in context and return to original on exit"""
  116. # From https://stackoverflow.com/a/37996581, couldn't find a built-in
  117. original_path = os.getcwd()
  118. os.chdir(path)
  119. try:
  120. yield
  121. finally:
  122. os.chdir(original_path)
  123. def extract_text_blocks_as_plain_text(paragraph_html):
  124. # Load article as DOM
  125. soup = BeautifulSoup(paragraph_html, 'html.parser')
  126. # Select all lists
  127. list_elements = soup.find_all(['ul', 'ol'])
  128. # Prefix text in all list items with "* " and make lists paragraphs
  129. for list_element in list_elements:
  130. plain_items = "".join(list(filter(None, [plain_text_leaf_node(li)["text"] for li in list_element.find_all('li')])))
  131. list_element.string = plain_items
  132. list_element.name = "p"
  133. # Select all text blocks
  134. text_blocks = [s.parent for s in soup.find_all(string=True)]
  135. text_blocks = [plain_text_leaf_node(block) for block in text_blocks]
  136. # Drop empty paragraphs
  137. text_blocks = list(filter(lambda p: p["text"] is not None, text_blocks))
  138. return text_blocks
  139. def plain_text_leaf_node(element):
  140. # Extract all text, stripped of any child HTML elements and normalise it
  141. plain_text = normalise_text(element.get_text())
  142. if plain_text != "" and element.name == "li":
  143. plain_text = "* {}, ".format(plain_text)
  144. if plain_text == "":
  145. plain_text = None
  146. if "data-node-index" in element.attrs:
  147. plain = {"node_index": element["data-node-index"], "text": plain_text}
  148. else:
  149. plain = {"text": plain_text}
  150. return plain
  151. def plain_content(readability_content, content_digests, node_indexes):
  152. # Load article as DOM
  153. soup = BeautifulSoup(readability_content, 'html.parser')
  154. # Make all elements plain
  155. elements = plain_elements(soup.contents, content_digests, node_indexes)
  156. if node_indexes:
  157. # Add node index attributes to nodes
  158. elements = [add_node_indexes(element) for element in elements]
  159. # Replace article contents with plain elements
  160. soup.contents = elements
  161. return str(soup)
  162. def plain_elements(elements, content_digests, node_indexes):
  163. # Get plain content versions of all elements
  164. elements = [plain_element(element, content_digests, node_indexes)
  165. for element in elements]
  166. if content_digests:
  167. # Add content digest attribute to nodes
  168. elements = [add_content_digest(element) for element in elements]
  169. return elements
  170. def plain_element(element, content_digests, node_indexes):
  171. # For lists, we make each item plain text
  172. if is_leaf(element):
  173. # For leaf node elements, extract the text content, discarding any HTML tags
  174. # 1. Get element contents as text
  175. plain_text = element.get_text()
  176. # 2. Normalise the extracted text string to a canonical representation
  177. plain_text = normalise_text(plain_text)
  178. # 3. Update element content to be plain text
  179. element.string = plain_text
  180. elif is_text(element):
  181. if is_non_printing(element):
  182. # The simplified HTML may have come from Readability.js so might
  183. # have non-printing text (e.g. Comment or CData). In this case, we
  184. # keep the structure, but ensure that the string is empty.
  185. element = type(element)("")
  186. else:
  187. plain_text = element.string
  188. plain_text = normalise_text(plain_text)
  189. element = type(element)(plain_text)
  190. else:
  191. # If not a leaf node or leaf type call recursively on child nodes, replacing
  192. element.contents = plain_elements(element.contents, content_digests, node_indexes)
  193. return element
  194. def add_node_indexes(element, node_index="0"):
  195. # Can't add attributes to string types
  196. if is_text(element):
  197. return element
  198. # Add index to current element
  199. element["data-node-index"] = node_index
  200. # Add index to child elements
  201. for local_idx, child in enumerate(
  202. [c for c in element.contents if not is_text(c)], start=1):
  203. # Can't add attributes to leaf string types
  204. child_index = "{stem}.{local}".format(
  205. stem=node_index, local=local_idx)
  206. add_node_indexes(child, node_index=child_index)
  207. return element
  208. def normalise_text(text):
  209. """Normalise unicode and whitespace."""
  210. # Normalise unicode first to try and standardise whitespace characters as much as possible before normalising them
  211. text = strip_control_characters(text)
  212. text = normalise_unicode(text)
  213. text = normalise_whitespace(text)
  214. return text
  215. def strip_control_characters(text):
  216. """Strip out unicode control characters which might break the parsing."""
  217. # Unicode control characters
  218. # [Cc]: Other, Control [includes new lines]
  219. # [Cf]: Other, Format
  220. # [Cn]: Other, Not Assigned
  221. # [Co]: Other, Private Use
  222. # [Cs]: Other, Surrogate
  223. control_chars = set(['Cc', 'Cf', 'Cn', 'Co', 'Cs'])
  224. retained_chars = ['\t', '\n', '\r', '\f']
  225. # Remove non-printing control characters
  226. return "".join(["" if (unicodedata.category(char) in control_chars) and (char not in retained_chars) else char for char in text])
  227. def normalise_unicode(text):
  228. """Normalise unicode such that things that are visually equivalent map to the same unicode string where possible."""
  229. normal_form = "NFKC"
  230. text = unicodedata.normalize(normal_form, text)
  231. return text
  232. def normalise_whitespace(text):
  233. """Replace runs of whitespace characters with a single space as this is what happens when HTML text is displayed."""
  234. text = regex.sub(r"\s+", " ", text)
  235. # Remove leading and trailing whitespace
  236. text = text.strip()
  237. return text
  238. def is_leaf(element):
  239. return (element.name in ['p', 'li'])
  240. def is_text(element):
  241. return isinstance(element, NavigableString)
  242. def is_non_printing(element):
  243. return any(isinstance(element, _e) for _e in [Comment, CData])
  244. def add_content_digest(element):
  245. if not is_text(element):
  246. element["data-content-digest"] = content_digest(element)
  247. return element
  248. def content_digest(element):
  249. if is_text(element):
  250. # Hash
  251. trimmed_string = element.string.strip()
  252. if trimmed_string == "":
  253. digest = ""
  254. else:
  255. digest = hashlib.sha256(trimmed_string.encode('utf-8')).hexdigest()
  256. else:
  257. contents = element.contents
  258. num_contents = len(contents)
  259. if num_contents == 0:
  260. # No hash when no child elements exist
  261. digest = ""
  262. elif num_contents == 1:
  263. # If single child, use digest of child
  264. digest = content_digest(contents[0])
  265. else:
  266. # Build content digest from the "non-empty" digests of child nodes
  267. digest = hashlib.sha256()
  268. child_digests = list(
  269. filter(lambda x: x != "", [content_digest(content) for content in contents]))
  270. for child in child_digests:
  271. digest.update(child.encode('utf-8'))
  272. digest = digest.hexdigest()
  273. return digest