aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@archive.org>2020-11-08 12:27:31 -0800
committerBryan Newbold <bnewbold@archive.org>2020-11-08 12:27:31 -0800
commit189eac07c2b559827fcd70c330821a940e65301d (patch)
tree3d5c837b34bf0fd106dc96153e0e10dc1868e9e6 /python
parent65d9bf7abe6def492bb14b9632d1a30f9980ff7f (diff)
downloadsandcrawler-189eac07c2b559827fcd70c330821a940e65301d.tar.gz
sandcrawler-189eac07c2b559827fcd70c330821a940e65301d.zip
html: handle no-capture for sub-resources
Diffstat (limited to 'python')
-rw-r--r--python/sandcrawler/html_ingest.py13
-rw-r--r--python/sandcrawler/ia.py3
-rw-r--r--python/sandcrawler/ingest.py6
3 files changed, 13 insertions, 9 deletions
diff --git a/python/sandcrawler/html_ingest.py b/python/sandcrawler/html_ingest.py
index f2819c2..6b71115 100644
--- a/python/sandcrawler/html_ingest.py
+++ b/python/sandcrawler/html_ingest.py
@@ -11,7 +11,7 @@ import trafilatura
import pydantic
from selectolax.parser import HTMLParser
-from sandcrawler.ia import WaybackClient, CdxApiClient, ResourceResult, cdx_to_dict, fix_transfer_encoding
+from sandcrawler.ia import WaybackClient, CdxApiClient, ResourceResult, cdx_to_dict, fix_transfer_encoding, NoCaptureError
from sandcrawler.misc import gen_file_metadata, parse_cdx_datetime, datetime_to_cdx
from sandcrawler.html_metadata import BiblioMetadata, html_extract_resources, html_extract_biblio, load_adblock_rules
@@ -124,11 +124,9 @@ def quick_fetch_html_resources(resources: List[dict], cdx_client: CdxApiClient,
for resource in resources:
cdx_row = cdx_client.lookup_best(resource['url'], closest=closest)
if not cdx_row:
- raise Exception("CDX lookup failed")
+ raise NoCaptureError(f"HTML sub-resource not found: {resource['url']}")
if cdx_row.url != resource['url']:
- pass
- #raise Exception(
- # f"CDX lookup URL mismatch: {cdx_row.url} != {resource['url']}")
+ print(f" WARN: CDX fuzzy match: {cdx_row.url} != {resource['url']}", file=sys.stderr)
full.append(WebResource(
surt=cdx_row.surt,
timestamp=cdx_row.datetime,
@@ -157,11 +155,10 @@ def fetch_html_resources(resources: List[dict], wayback_client: WaybackClient, w
for resource in resources:
wayback_resp = wayback_client.lookup_resource(resource['url'], closest=closest)
if not wayback_resp or wayback_resp.status != 'success':
- # TODO: raise a specific exception so we can catch it elsewhere?
- raise Exception("wayback lookup failed")
+ raise NoCaptureError(f"HTML sub-resource not found: {resource['url']}")
file_meta = gen_file_metadata(wayback_resp.body)
if file_meta['sha1hex'] != wayback_resp.cdx.sha1hex:
- raise Exception("wayback payload sha1hex mismatch")
+ raise WaybackError("wayback payload sha1hex mismatch: {wayback_resp.cdx.url}")
full.append(WebResource(
surt=wayback_resp.cdx.surt,
timestamp=parse_cdx_datetime(wayback_resp.cdx.datetime),
diff --git a/python/sandcrawler/ia.py b/python/sandcrawler/ia.py
index da667b6..639fab8 100644
--- a/python/sandcrawler/ia.py
+++ b/python/sandcrawler/ia.py
@@ -310,6 +310,9 @@ class WaybackContentError(Exception):
class PetaboxError(Exception):
pass
+class NoCaptureError(Exception):
+ pass
+
class WaybackClient:
def __init__(self, cdx_client=None, **kwargs):
diff --git a/python/sandcrawler/ingest.py b/python/sandcrawler/ingest.py
index 2f9c523..aedf2ff 100644
--- a/python/sandcrawler/ingest.py
+++ b/python/sandcrawler/ingest.py
@@ -10,7 +10,7 @@ from http.server import BaseHTTPRequestHandler, HTTPServer
from collections import namedtuple
from selectolax.parser import HTMLParser
-from sandcrawler.ia import SavePageNowClient, CdxApiClient, WaybackClient, WaybackError, WaybackContentError, SavePageNowError, CdxApiError, PetaboxError, cdx_to_dict, ResourceResult, fix_transfer_encoding
+from sandcrawler.ia import SavePageNowClient, CdxApiClient, WaybackClient, WaybackError, WaybackContentError, SavePageNowError, CdxApiError, PetaboxError, cdx_to_dict, ResourceResult, fix_transfer_encoding, NoCaptureError
from sandcrawler.grobid import GrobidClient
from sandcrawler.pdfextract import process_pdf, PdfExtractResult
from sandcrawler.misc import gen_file_metadata, clean_url, parse_cdx_datetime
@@ -393,6 +393,10 @@ class IngestFileWorker(SandcrawlerWorker):
partial_result['status'] = 'wayback-content-error'
partial_result['error_message'] = str(e)[:1600]
return partial_result
+ except NoCaptureError as e:
+ partial_result['status'] = 'html-resource-no-capture'
+ partial_result['error_message'] = str(e)[:1600]
+ return partial_result
if self.htmlteixml_sink and html_body['status'] == "success":
self.htmlteixml_sink.push_record(html_body, key=file_meta['sha1hex'])