aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--proposals/20201012_no_capture.md36
-rw-r--r--python/sandcrawler/ia.py2
-rw-r--r--python/sandcrawler/ingest.py4
3 files changed, 39 insertions, 3 deletions
diff --git a/proposals/20201012_no_capture.md b/proposals/20201012_no_capture.md
new file mode 100644
index 0000000..bb47ea2
--- /dev/null
+++ b/proposals/20201012_no_capture.md
@@ -0,0 +1,36 @@
+
+status: in-progress
+
+Storing no-capture missing URLs in `terminal_url`
+=================================================
+
+Currently, when the bulk-mode ingest code terminates with a `no-capture`
+status, the missing URL (which is not in GWB CDX) is not stored in
+sandcrawler-db. This proposed change is to include it in the existing
+`terminal_url` database column, with the `terminal_status_code` and
+`terminal_dt` columns empty.
+
+The implementation is rather simple:
+
+- CDX lookup code path should save the *actual* final missing URL (`next_url`
+ after redirects) in the result object's `terminal_url` field
+- ensure that this field gets passed through all the way to the database on the
+ `no-capture` code path
+
+This change does change the semantics of the `terminal_url` field somewhat, and
+could break existing assumptions, so it is being documented in this proposal
+document.
+
+
+## Alternatives
+
+The current status quo is to store the missing URL as the last element in the
+"hops" field of the JSON structure. We could keep this and have a convoluted
+pipeline that would read from the Kafka feed and extract them, but this would
+be messy. Eg, re-ingesting would not update the old kafka messages, so we could
+need some accounting of consumer group offsets after which missing URLs are
+truely missing.
+
+We could add a new `missing_url` database column and field to the JSON schema,
+for this specific use case. This seems like unnecessary extra work.
+
diff --git a/python/sandcrawler/ia.py b/python/sandcrawler/ia.py
index 7b623bc..2bc52ce 100644
--- a/python/sandcrawler/ia.py
+++ b/python/sandcrawler/ia.py
@@ -589,7 +589,7 @@ class WaybackClient:
start_url=start_url,
hit=False,
status="no-capture",
- terminal_url=None,
+ terminal_url=next_url,
terminal_dt=None,
terminal_status_code=None,
body=None,
diff --git a/python/sandcrawler/ingest.py b/python/sandcrawler/ingest.py
index e8e517a..5ab7e13 100644
--- a/python/sandcrawler/ingest.py
+++ b/python/sandcrawler/ingest.py
@@ -387,7 +387,7 @@ class IngestFileWorker(SandcrawlerWorker):
if not resource.hit:
result['status'] = resource.status
- if resource.terminal_dt and resource.terminal_status_code:
+ if resource.terminal_url:
result['terminal'] = {
"terminal_url": resource.terminal_url,
"terminal_dt": resource.terminal_dt,
@@ -465,7 +465,7 @@ class IngestFileWorker(SandcrawlerWorker):
result['status'] = "max-hops-exceeded"
return result
- if resource.terminal_dt:
+ if resource.terminal_url:
result['terminal'] = {
"terminal_url": resource.terminal_url,
"terminal_dt": resource.terminal_dt,