diff options
author | Bryan Newbold <bnewbold@archive.org> | 2021-12-06 16:17:10 -0800 |
---|---|---|
committer | Bryan Newbold <bnewbold@archive.org> | 2021-12-06 16:17:10 -0800 |
commit | aa72b77ba0268adac1583b96eb4ac40dbdfc9e4c (patch) | |
tree | 464273cbce516596b339c09107594d2c1fde154a | |
parent | 5b8a58ab37d5187dcf750b498d5e91b91f0c758a (diff) | |
download | fatcat-scholar-aa72b77ba0268adac1583b96eb4ac40dbdfc9e4c.tar.gz fatcat-scholar-aa72b77ba0268adac1583b96eb4ac40dbdfc9e4c.zip |
fmt and comments
-rw-r--r-- | fatcat_scholar/sim_pipeline.py | 4 | ||||
-rw-r--r-- | fatcat_scholar/work_pipeline.py | 43 |
2 files changed, 32 insertions, 15 deletions
diff --git a/fatcat_scholar/sim_pipeline.py b/fatcat_scholar/sim_pipeline.py index 34aaae3..65fb3bd 100644 --- a/fatcat_scholar/sim_pipeline.py +++ b/fatcat_scholar/sim_pipeline.py @@ -23,7 +23,9 @@ def truncate_pub_meta(full: Dict[str, Any]) -> Dict[str, Any]: """ full.pop("files") if "ulrichs" in full and full["ulrichs"]: - full["ulrichs"] = [full["ulrichs"][0],] + full["ulrichs"] = [ + full["ulrichs"][0], + ] full["ulrichs"][0].pop("reviews_mfl") full["ulrichs"][0].pop("editorial_description") diff --git a/fatcat_scholar/work_pipeline.py b/fatcat_scholar/work_pipeline.py index f17d69d..93e7aa2 100644 --- a/fatcat_scholar/work_pipeline.py +++ b/fatcat_scholar/work_pipeline.py @@ -19,11 +19,20 @@ from fatcat_scholar.sandcrawler import ( SandcrawlerMinioClient, SandcrawlerPostgrestClient, ) -from fatcat_scholar.schema import DocType, IntermediateBundle +from fatcat_scholar.schema import DocType, IntermediateBundle, clean_str from fatcat_scholar.sim_pipeline import truncate_issue_meta, truncate_pub_meta def parse_pages(raw: str) -> Tuple[Optional[int], Optional[int]]: + """ + Takes a string representing page numbers, and tries to turn it into a span + of page numbers as integers. + + Handles common syntax like "466-7" to mean "466 to 467". + + If there is only a single page number, returns the first page as the last + page as well. + """ first_raw = raw.split("-")[0] if not first_raw.isdigit(): return (None, None) @@ -228,11 +237,13 @@ class WorkPipeline: Fetches (cached) crossref metadata JSON from sandcrawler-db via postgrest HTTP interface. - Returns a JSON object on success, or None if not found. + Returns a dict object on success, or None if not found. - release_ident: Optional[str] - doi: Optional[str] - record: Optional[str] + Dict keys: + + release_ident: Optional[str] + doi: Optional[str] + record: Optional[str] """ if not re.ext_ids.doi: # can't do lookup without a DOI @@ -280,15 +291,19 @@ class WorkPipeline: release_ident: str, ) -> Optional[Any]: """ - issue_item - pages: str - page_texts: list - page_num - leaf_num - raw_text - release_ident: Optional[str] - pub_item_metadata - issue_item_metadata + Returns a dict with keys: + + issue_item + pages: str + page_texts: list + page_num + leaf_num + raw_text + release_ident: Optional[str] + pub_item_metadata + issue_item_metadata + + Or None if not found. """ first_page, last_page = parse_pages(pages) |