aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Czygan <martin@archive.org>2021-07-02 16:18:51 +0000
committerMartin Czygan <martin@archive.org>2021-07-02 16:18:51 +0000
commita7b35c48612427d763c1db348b7a7f4083a0410b (patch)
tree1e2148cdeaef8283f174621327c4d000d171f277
parent0d5535742786fe78f6509b6606ca381912ed8bc7 (diff)
parent7b900c757a4306be0aeb90c1b13ccb9fa266e621 (diff)
downloadfuzzycat-a7b35c48612427d763c1db348b7a7f4083a0410b.tar.gz
fuzzycat-a7b35c48612427d763c1db348b7a7f4083a0410b.zip
Merge branch 'bnewbold-verify-improvements' into 'master'
verify improvements See merge request webgroup/fuzzycat!4
-rw-r--r--fuzzycat/cluster.py15
-rw-r--r--fuzzycat/grobid_unstructured.py3
-rw-r--r--fuzzycat/simple.py3
-rw-r--r--fuzzycat/utils.py32
-rw-r--r--fuzzycat/verify.py10
-rw-r--r--tests/test_utils.py24
6 files changed, 73 insertions, 14 deletions
diff --git a/fuzzycat/cluster.py b/fuzzycat/cluster.py
index 4e70bdd..c8384c1 100644
--- a/fuzzycat/cluster.py
+++ b/fuzzycat/cluster.py
@@ -151,10 +151,20 @@ SANDCRAWLER_CHAR_MAP = {
'\N{Latin capital letter T with stroke}': 'T',
'\N{Latin small letter t with stroke}': 't',
- # bnewbold additions
+ # bnewbold additions; mostly Latin-ish OCR ambiguous
'\N{MICRO SIGN}': 'u',
'\N{LATIN SMALL LETTER C}': 'c',
'\N{LATIN SMALL LETTER F WITH HOOK}': 'f',
+ '\N{Greek Small Letter Alpha}': 'a',
+ '\N{Greek Small Letter Beta}': 'b',
+ '\N{Greek Small Letter Iota}': 'i',
+ '\N{Greek Small Letter Kappa}': 'k',
+ '\N{Greek Small Letter Chi}': 'x',
+ '\N{Greek Small Letter Upsilon}': 'u',
+ '\N{Greek Small Letter Nu}': 'v',
+ '\N{Greek Small Letter Gamma}': 'y',
+ '\N{Greek Small Letter Tau}': 't',
+ '\N{Greek Small Letter Omicron}': 'o',
# bnewbold map-to-null (for non-printing stuff not in the regex)
'\N{PARTIAL DIFFERENTIAL}': '',
'\N{LATIN LETTER INVERTED GLOTTAL STOP}': '',
@@ -193,7 +203,7 @@ def sandcrawler_slugify(raw: str) -> str:
slug = slug.replace("&apos;", "'")
# iterate over all chars and replace from map, if in map; then lower-case again
- slug = ''.join([SANDCRAWLER_CHAR_MAP.get(c, c) for c in slug])
+ slug = ''.join([SANDCRAWLER_CHAR_MAP.get(c, c) for c in slug]).lower()
# early bailout before executing regex
if not slug:
@@ -217,6 +227,7 @@ def test_sandcrawler_slugify() -> None:
("علمية", "علمية"),
("期刊的数字", "期刊的数字"),
("les pré-impressions explorées à partir", "lespreimpressionsexploreesapartir"),
+ ("γ-Globulin", "yglobulin"),
# "MICRO SIGN"
("\xb5meter", "umeter"),
diff --git a/fuzzycat/grobid_unstructured.py b/fuzzycat/grobid_unstructured.py
index 79c39d3..5462ae1 100644
--- a/fuzzycat/grobid_unstructured.py
+++ b/fuzzycat/grobid_unstructured.py
@@ -18,6 +18,7 @@ from fatcat_openapi_client import ReleaseContrib, ReleaseEntity, ReleaseExtIds
from fuzzycat.config import settings
from fuzzycat.grobid2json import biblio_info
+from fuzzycat.utils import clean_doi
GROBID_API_BASE = settings.get("GROBID_API_BASE", "https://grobid.qa.fatcat.wiki")
@@ -89,7 +90,7 @@ def grobid_ref_to_release(ref: dict) -> ReleaseEntity:
issue=ref.get("issue"),
pages=ref.get("pages"),
ext_ids=ReleaseExtIds(
- doi=ref.get("doi"),
+ doi=clean_doi(ref.get("doi")),
pmid=ref.get("pmid"),
pmcid=ref.get("pmcid"),
arxiv=ref.get("arxiv_id"),
diff --git a/fuzzycat/simple.py b/fuzzycat/simple.py
index c78ac28..8b206b1 100644
--- a/fuzzycat/simple.py
+++ b/fuzzycat/simple.py
@@ -26,6 +26,7 @@ from fuzzycat.entities import entity_to_dict
from fuzzycat.grobid_unstructured import grobid_parse_unstructured
from fuzzycat.matching import match_release_fuzzy
from fuzzycat.verify import verify
+from fuzzycat.utils import clean_doi
@dataclass
@@ -184,7 +185,7 @@ def biblio_to_release(biblio: dict) -> ReleaseEntity:
release = ReleaseEntity(
title=biblio.get("title"),
ext_ids=ReleaseExtIds(
- doi=biblio.get("doi"),
+ doi=clean_doi(biblio.get("doi")),
pmid=biblio.get("pmid"),
pmcid=biblio.get("pmcid"),
arxiv=biblio.get("arxiv_id"),
diff --git a/fuzzycat/utils.py b/fuzzycat/utils.py
index d37ee32..a1c5124 100644
--- a/fuzzycat/utils.py
+++ b/fuzzycat/utils.py
@@ -6,6 +6,7 @@ import re
import string
import subprocess
import tempfile
+from typing import Optional
import requests
from glom import PathAccessError, glom
@@ -35,20 +36,32 @@ def es_compat_hits_total(resp):
def parse_page_string(s):
"""
- Parse typical page strings, e.g. 150-180.
+ Parse typical page strings, e.g. 150-180 or p123.
+
+ If only a single page number is found, returns that first page and None for
+ end page and count. If two are found, and they are consistent as a range,
+ returns the start, end, and count.
+
+ Does not handle lists of page numbers, roman numerals, and several other
+ patterns.
"""
if not s:
raise ValueError('page parsing: empty string')
+ if s[0].lower() in ('p', 'e'):
+ s = s[1:]
if s.isnumeric():
- return ParsedPages(start=int(s), end=int(s), count=1)
+ return ParsedPages(start=int(s), end=None, count=None)
page_pattern = re.compile("([0-9]{1,})-([0-9]{1,})")
match = page_pattern.match(s)
if not match:
raise ValueError('cannot parse page pattern from {}'.format(s))
start, end = match.groups()
if len(end) == 1 and start and start[-1] < end:
- # 261-5, odd, but happens
+ # '261-5', odd, but happens
end = start[:-1] + end
+ elif len(end) == 2 and start and start[-2:] < end:
+ # '577-89', also happens
+ end = start[:-2] + end
a, b = int(start), int(end)
if a > b:
raise ValueError('invalid page range: {}'.format(s))
@@ -68,6 +81,19 @@ def dict_key_exists(doc, path):
else:
return True
+def clean_doi(raw: Optional[str]) -> Optional[str]:
+ if not raw:
+ return None
+ raw = raw.strip().lower()
+ if raw.startswith("doi:"):
+ raw = raw[4:]
+ if not "10." in raw:
+ return None
+ if not raw.startswith("10."):
+ raw = raw[raw.find("10."):]
+ if raw[7:9] == "//":
+ raw = raw[:8] + raw[9:]
+ return raw
def doi_prefix(v):
"""
diff --git a/fuzzycat/verify.py b/fuzzycat/verify.py
index 45a809e..1eeea40 100644
--- a/fuzzycat/verify.py
+++ b/fuzzycat/verify.py
@@ -92,7 +92,7 @@ from fuzzycat.data import (CONTAINER_NAME_BLACKLIST, PUBLISHER_BLACKLIST, TITLE_
from fuzzycat.entities import entity_to_dict
from fuzzycat.utils import (author_similarity_score, contains_chemical_formula, dict_key_exists,
doi_prefix, has_doi_prefix, jaccard, num_project, parse_page_string,
- slugify_string)
+ slugify_string, clean_doi)
Verify = collections.namedtuple("Verify", "status reason")
@@ -167,8 +167,8 @@ def verify(a: Dict, b: Dict, min_title_length=5) -> Tuple[str, str]:
# A few items have the same DOI.
try:
- a_doi = glom(a, "ext_ids.doi")
- b_doi = glom(b, "ext_ids.doi")
+ a_doi = clean_doi(glom(a, "ext_ids.doi"))
+ b_doi = clean_doi(glom(b, "ext_ids.doi"))
if a_doi is not None and a_doi == b_doi:
return Verify(Status.EXACT, Reason.DOI)
except PathAccessError:
@@ -597,7 +597,9 @@ def verify(a: Dict, b: Dict, min_title_length=5) -> Tuple[str, str]:
try:
a_parsed_pages = parse_page_string(glom(a, "pages"))
b_parsed_pages = parse_page_string(glom(b, "pages"))
- if abs(a_parsed_pages.count - b_parsed_pages.count) > 5:
+ if (a_parsed_pages.count != None
+ and b_parsed_pages.count != None
+ and abs(a_parsed_pages.count - b_parsed_pages.count) > 5):
return Verify(Status.DIFFERENT, Reason.PAGE_COUNT)
except (ValueError, PathAccessError):
pass
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 381c44e..21b85a4 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -3,7 +3,7 @@ import os
from fuzzycat.utils import (author_similarity_score, cut, jaccard, nwise, slugify_string,
token_n_grams, tokenize_string, parse_page_string, dict_key_exists,
- zstdlines, es_compat_hits_total)
+ zstdlines, es_compat_hits_total, clean_doi)
def test_slugify_string():
@@ -77,15 +77,20 @@ def test_dict_key_exists():
def test_page_page_string():
- reject = ("", "123-2", "123-120", "123a-124", "-2-1")
+ reject = ("", "123-2", "123-120", "123a-124", "-2-1", "I-II", "xv-xvi", "p")
for s in reject:
with pytest.raises(ValueError):
assert parse_page_string(s)
- assert parse_page_string("123") == (123, 123, 1)
+ assert parse_page_string("123") == (123, None, None)
+ assert parse_page_string("90-90") == (90, 90, 1)
assert parse_page_string("123-5") == (123, 125, 3)
assert parse_page_string("123-125") == (123, 125, 3)
assert parse_page_string("123-124a") == (123, 124, 2)
assert parse_page_string("1-1000") == (1, 1000, 1000)
+ assert parse_page_string("p55") == (55, None, None)
+ assert parse_page_string("p55-65") == (55, 65, 11)
+ assert parse_page_string("e1234") == (1234, None, None)
+ assert parse_page_string("577-89") == (577, 589, 13)
def test_zstdlines():
@@ -118,3 +123,16 @@ def test_es_compat_hits_total():
)
for r, expected in cases:
assert es_compat_hits_total(r) == expected
+
+def test_clean_doi():
+ assert clean_doi(None) == None
+ assert clean_doi("blah") == None
+ assert clean_doi("10.1234/asdf ") == "10.1234/asdf"
+ assert clean_doi("10.1037//0002-9432.72.1.50") == "10.1037/0002-9432.72.1.50"
+ assert clean_doi("10.1037/0002-9432.72.1.50") == "10.1037/0002-9432.72.1.50"
+ assert clean_doi("http://doi.org/10.1234/asdf ") == "10.1234/asdf"
+ # GROBID mangled DOI
+ assert clean_doi("21924DOI10.1234/asdf ") == "10.1234/asdf"
+ assert clean_doi("https://dx.doi.org/10.1234/asdf ") == "10.1234/asdf"
+ assert clean_doi("doi:10.1234/asdf ") == "10.1234/asdf"
+ assert clean_doi("10.7326/M20-6817") == "10.7326/m20-6817"