diff options
-rw-r--r-- | commands.md | 42 | ||||
-rw-r--r-- | notes/missing_2020-03-20.md | 4 |
2 files changed, 41 insertions, 5 deletions
diff --git a/commands.md b/commands.md index 7508842..4ef7231 100644 --- a/commands.md +++ b/commands.md @@ -16,7 +16,8 @@ Fetch and transform metadata: Existing fatcat ES transform: - cat /srv/covid19.fatcat.wiki/src/metadata/cord19.2020-03-27.enrich.json | jq .fatcat_release -c | rg -v '^null$' | ./fatcat_transform.py elasticsearch-releases - - | pv -l > cord19.2020-03-27.fatcat_es.json + # in fatcat python directory, pipenv shell + cat /srv/covid19.fatcat.wiki/src/metadata/cord19.2020-03-27.enrich.json | jq .fatcat_release -c | rg -v '^null$' | ./fatcat_transform.py elasticsearch-releases - - | pv -l > /srv/covid19.fatcat.wiki/src/metadata/cord19.2020-03-27.fatcat_es.json Download fulltext from wayback: @@ -25,14 +26,49 @@ Download fulltext from wayback: Extract text from PDFs: ls fulltext_web/pdf/ | parallel mkdir -p fulltext_web/pdftotext/{} - fd .pdf fulltext_web/pdf/ | cut -c18-60 | parallel -j10 pdftotext fulltext_web/pdf/{}.pdf fulltext_web/pdftotext/{}.txt + fd -I .pdf fulltext_web/pdf/ | cut -c18-60 | parallel -j10 pdftotext fulltext_web/pdf/{}.pdf fulltext_web/pdftotext/{}.txt + +Create thumbnails: + + ls fulltext_web/pdf/ | parallel mkdir -p fulltext_web/thumbnail/{} + fd -I .pdf fulltext_web/pdf/ | cut -c18-60 | parallel -j10 pdftocairo -png -singlefile -scale-to-x 400 -scale-to-y -1 fulltext_web/pdf/{}.pdf fulltext_web/thumbnail/{} + +Fetch GROBID: + +Convert GROBID XML to JSON: + + ls fulltext_web/pdf/ | parallel mkdir -p fulltext_web/grobid/{} + fd -I .xml fulltext_web/grobid/ | cut -c18-60 | parallel -j10 "bin/grobid2json.py fulltext_web/grobid/{}.xml > fulltext_web/grobid/{}.json" + +Create large derivatives file (including extracted fulltext): + + ./cord19_fatcat_derivatives.py metadata/cord19.2020-03-27.enrich.json --base-dir fulltext_web/ | pv -l > fulltext.json + + cat fulltext.json | jq .fulltext_status -r | sort | uniq -c | sort -nr + ## ES Indices Create and index existing `fatcat_release` schema: - http put :9200/covid19_fatcat_release < release_schema.json + http put :9200/covid19_fatcat_release < schema/release_schema_v03b.json # in fatcat python directory, pipenv shell export LC_ALL=C.UTF-8 cat /srv/covid19.fatcat.wiki/src/metadata/cord19.2020-03-27.enrich.json | jq .fatcat_release -c | rg -v '^null$' | pv -l | ./fatcat_transform.py elasticsearch-releases - - | esbulk -verbose -size 1000 -id ident -w 8 -index covid19_fatcat_release -type release + +Create fulltext index: + + http put :9200/covid19_fatcat_fulltext < schema/fulltext_schema_v00.json + +Transform to ES schema and index: + + ./elastic_transform.py cord19.2020-03-27.fulltext.json | pv -l | esbulk -verbose -size 1000 -id fatcat_ident -w 8 -index covid19_fatcat_fulltext -type release + +## GROBID Processing + + zip -r fulltext_web.zip fulltext_web + + # on GROBID worker, in sandcrawler repo and pipenv + ./grobid_tool.py --grobid-host http://localhost:8070 -j 24 extract-zipfile /srv/sandcrawler/tasks/fulltext_web.zip | pv -l > /srv/sandcrawler/tasks/fulltext_web.grobid.json + diff --git a/notes/missing_2020-03-20.md b/notes/missing_2020-03-20.md index 2576a8e..4eaaa4e 100644 --- a/notes/missing_2020-03-20.md +++ b/notes/missing_2020-03-20.md @@ -290,11 +290,11 @@ Interesting sites to crawl or translate: cat missing_doi_status.tsv | rg '404$' | cut -f1 > unregistered_doi.tsv - cat missing_doi_status.tsv | rg '302$' | cut -f1 | parallel http --json get "https://api.crossref.org/v1/works/http://dx.doi.org/{}" | jq .message -c | pv -l > missing_doi_crossref.json + cat missing_doi_status.tsv | rg '302$' | cut -f1 | parallel -j1 'http --json get "https://api.crossref.org/v1/works/http://dx.doi.org/{}" mailto==webservices@archive.org' | rg '^\{' | jq .message -c | pv -l > missing_doi_crossref.json mkdir -p pubmed - cat missing_pmcid.tsv | parallel -j1 'http get "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={}&rettype=pubmed" > pubmed/{}.xml' cat missing_pmid.tsv | parallel -j1 'http get "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pubmed&id={}&rettype=pubmed" > pubmed/{}.xml' + cat missing_pmcid.tsv | parallel -j1 'http get "https://eutils.ncbi.nlm.nih.gov/entrez/eutils/efetch.fcgi?db=pmc&id={}&rettype=pubmed" > pubmed/{}.xml' cat pubmed/*.xml | rg -v '^<\?xml version' | rg -v '^<!DOCTYPE' | rg -v '^<PubmedArticleSet>' | rg -v '^</PubmedArticleSet>' > pubmed_combined.xml |