diff options
| author | Bryan Newbold <bnewbold@archive.org> | 2020-11-06 18:32:35 -0800 | 
|---|---|---|
| committer | Bryan Newbold <bnewbold@archive.org> | 2020-11-06 18:32:35 -0800 | 
| commit | 175019c96fced3e21d0f60ea1a4a37da6b8872ac (patch) | |
| tree | f42fbbe9c8ac06ae9eb06373ab9eec96d2b3a177 /sql | |
| parent | b0b66c20c6ffb9d8acc626068964d7dfd5d3bcdc (diff) | |
| parent | 47ca1a273912c8836630b0930b71a4e66fd2c85b (diff) | |
| download | sandcrawler-175019c96fced3e21d0f60ea1a4a37da6b8872ac.tar.gz sandcrawler-175019c96fced3e21d0f60ea1a4a37da6b8872ac.zip | |
Merge branch 'bnewbold-html-ingest'
Diffstat (limited to 'sql')
| -rw-r--r-- | sql/dump_unmatched_glutton_pdf.sql | 19 | ||||
| -rw-r--r-- | sql/migrations/2019-12-19-060141_init/up.sql | 15 | ||||
| -rw-r--r-- | sql/monitoring_queries.md | 26 | 
3 files changed, 60 insertions, 0 deletions
| diff --git a/sql/dump_unmatched_glutton_pdf.sql b/sql/dump_unmatched_glutton_pdf.sql new file mode 100644 index 0000000..d089c7e --- /dev/null +++ b/sql/dump_unmatched_glutton_pdf.sql @@ -0,0 +1,19 @@ + +-- Run like: +--   psql sandcrawler < THING.sql > THING.2019-09-23.json + +BEGIN TRANSACTION ISOLATION LEVEL SERIALIZABLE READ ONLY DEFERRABLE; + +COPY ( +  SELECT row_to_json(grobid) +  FROM grobid +  LEFT JOIN fatcat_file ON grobid.sha1hex = fatcat_file.sha1hex +  WHERE fatcat_file.sha1hex IS NULL +  AND grobid.fatcat_release IS NOT NULL +  LIMIT 1000 +) +TO '/grande/snapshots/dump_unmatched_glutton_pdf.2020-06-30.json'; +--TO STDOUT +--WITH NULL ''; + +ROLLBACK; diff --git a/sql/migrations/2019-12-19-060141_init/up.sql b/sql/migrations/2019-12-19-060141_init/up.sql index 59423dd..73bd7f1 100644 --- a/sql/migrations/2019-12-19-060141_init/up.sql +++ b/sql/migrations/2019-12-19-060141_init/up.sql @@ -114,6 +114,20 @@ CREATE TABLE IF NOT EXISTS pdf_meta (      --    encrypted  ); +CREATE TABLE IF NOT EXISTS html_meta ( +    sha1hex             TEXT PRIMARY KEY CHECK (octet_length(sha1hex) = 40), +    updated             TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL, +    status              TEXT CHECK (octet_length(status) >= 1) NOT NULL, +    scope               TEXT CHECK (octet_length(status) >= 1), +    has_teixml          BOOLEAN NOT NULL, +    has_thumbnail       BOOLEAN NOT NULL, +    word_count          INT CHECK (word_count >= 0), +    biblio              JSONB, +    resources           JSONB +    -- biblio JSON fields are similar to fatcat release schema +    -- resources JSON object is a list of objects with keys like webcapture CDX schema +); +  CREATE TABLE IF NOT EXISTS ingest_request (      link_source             TEXT NOT NULL CHECK (octet_length(link_source) >= 1),      link_source_id          TEXT NOT NULL CHECK (octet_length(link_source_id) >= 1), @@ -128,6 +142,7 @@ CREATE TABLE IF NOT EXISTS ingest_request (      --   ext_ids (source/source_id sometimes enough)      --   fatcat_release (if ext_ids and source/source_id not specific enough; eg SPN)      --   edit_extra +    -- ingest type can be: pdf, xml, html      PRIMARY KEY (link_source, link_source_id, ingest_type, base_url)  ); diff --git a/sql/monitoring_queries.md b/sql/monitoring_queries.md index 1738731..1c872cc 100644 --- a/sql/monitoring_queries.md +++ b/sql/monitoring_queries.md @@ -39,6 +39,32 @@ Broken domains, past 30 days:      ORDER BY COUNT DESC      LIMIT 25; +Summary of significant domains and status, past 7 days: + +    SELECT domain, status, count +    FROM ( +        SELECT domain, status, COUNT((domain, status)) as count +        FROM ( +            SELECT +                ingest_file_result.ingest_type, +                ingest_file_result.status, +                substring(ingest_file_result.terminal_url FROM '[^/]+://([^/]*)') AS domain +            FROM ingest_file_result +            LEFT JOIN ingest_request +                ON ingest_file_result.ingest_type = ingest_request.ingest_type +                AND ingest_file_result.base_url = ingest_request.base_url +            WHERE +                ingest_file_result.updated >= NOW() - '7 day'::INTERVAL +                AND ingest_request.ingest_type = 'pdf' +                AND ingest_request.ingest_request_source = 'fatcat-changelog' +        ) t1 +        WHERE t1.domain != '' +        GROUP BY CUBE (domain, status) +    ) t2 +    WHERE count > 500 +    ORDER BY domain ASC , count DESC; + +  Throughput per day, and success, for past 30 days:      SELECT ingest_request.ingest_type, | 
