aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin Czygan <martin@archive.org>2021-09-21 18:34:31 +0000
committerMartin Czygan <martin@archive.org>2021-09-21 18:34:31 +0000
commita37404f30b2c1afa0b46ee30a5b59d7312c119d0 (patch)
tree8f38d396909049971559a5ab574561960d6d5f22
parentc587a084defe54103aa147b7ab91542a11a548b1 (diff)
parent5fa61d89320af880d5bf6b3231f6478887cfb6a6 (diff)
downloadfuzzycat-a37404f30b2c1afa0b46ee30a5b59d7312c119d0.tar.gz
fuzzycat-a37404f30b2c1afa0b46ee30a5b59d7312c119d0.zip
Merge branch 'wip-martin-review-cleanup' into 'master'
review notes and some cleanup See merge request webgroup/fuzzycat!7
-rw-r--r--fuzzycat/config.py1
-rw-r--r--fuzzycat/matching.py77
-rw-r--r--fuzzycat/simple.py2
-rw-r--r--fuzzycat/utils.py2
-rw-r--r--fuzzycat/verify.py9
-rw-r--r--notes/2021_09_review.md129
-rw-r--r--notes/Makefile9
-rw-r--r--notes/known_issues.md17
-rw-r--r--notes/todo_manual_review.md (renamed from notes/todo.md)0
-rw-r--r--notes/workflow_schema.dot (renamed from notes/steps.dot)0
-rw-r--r--notes/workflow_schema.png (renamed from notes/steps.png)bin29268 -> 29268 bytes
-rw-r--r--tests/test_matching.py45
-rw-r--r--tests/test_utils.py1
13 files changed, 272 insertions, 20 deletions
diff --git a/fuzzycat/config.py b/fuzzycat/config.py
index 61050d1..d6989cf 100644
--- a/fuzzycat/config.py
+++ b/fuzzycat/config.py
@@ -1,4 +1,3 @@
-
from dynaconf import Dynaconf
settings = Dynaconf(envvar_prefix="FUZZYCAT")
diff --git a/fuzzycat/matching.py b/fuzzycat/matching.py
index c94a308..310dfc2 100644
--- a/fuzzycat/matching.py
+++ b/fuzzycat/matching.py
@@ -10,9 +10,9 @@ import requests
from fatcat_openapi_client import ContainerEntity, DefaultApi, ReleaseEntity
from fatcat_openapi_client.rest import ApiException
+from fuzzycat.config import settings
from fuzzycat.entities import entity_from_dict, entity_from_json
from fuzzycat.utils import es_compat_hits_total
-from fuzzycat.config import settings
FATCAT_API_URL = settings.get("FATCAT_API_URL", "https://api.fatcat.wiki/v0")
@@ -73,12 +73,83 @@ def match_release_fuzzy(
if r:
return [r]
+
+ if release.title is not None and release.contribs is not None:
+ names = " ".join([c.raw_name for c in release.contribs])
+ body = {
+ "track_total_hits": True,
+ "query": {
+ "bool": {
+ "must": [
+ {
+ "match": {
+ "title": {
+ "query": release.title,
+ "operator": "AND",
+ "fuzziness": "AUTO",
+ },
+ }
+ },
+ {
+ "match": {
+ "contrib_names": {
+ "query": names,
+ "operator": "AND",
+ "fuzziness": "AUTO",
+ }
+ }
+ },
+ ],
+ },
+ },
+ "size": size,
+ }
+ resp = es.search(body=body, index="fatcat_release")
+ if es_compat_hits_total(resp) > 0:
+ return response_to_entity_list(resp, entity_type=ReleaseEntity, size=size, api=api)
+
+ body = {
+ "track_total_hits": True,
+ "query": {
+ "bool": {
+ "should": [
+ {
+ "match": {
+ "title": {
+ "query": release.title,
+ "operator": "AND",
+ "fuzziness": "AUTO",
+ },
+ }
+ },
+ {
+ "match": {
+ "contrib_names": {
+ "query": names,
+ "operator": "AND",
+ "fuzziness": "AUTO",
+ }
+ }
+ },
+ ],
+ },
+ },
+ "size": size,
+ }
+ resp = es.search(body=body, index="fatcat_release")
+ if es_compat_hits_total(resp) > 0:
+ return response_to_entity_list(resp, entity_type=ReleaseEntity, size=size, api=api)
+
+ # Note: If the title is short, we will get lots of results here; do we need
+ # to check for title length or result set length length or result set
+ # length here?
body = {
+ "track_total_hits": True,
"query": {
"match": {
"title": {
"query": release.title,
- "operator": "AND"
+ "operator": "AND",
}
}
},
@@ -91,6 +162,7 @@ def match_release_fuzzy(
# Get fuzzy.
# https://www.elastic.co/guide/en/elasticsearch/reference/current/common-options.html#fuzziness
body = {
+ "track_total_hits": True,
"query": {
"match": {
"title": {
@@ -106,6 +178,7 @@ def match_release_fuzzy(
if es_compat_hits_total(resp) > 0:
return response_to_entity_list(resp, entity_type=ReleaseEntity, size=size, api=api)
+
# TODO: perform more queries on other fields.
return []
diff --git a/fuzzycat/simple.py b/fuzzycat/simple.py
index 8b206b1..ff59ba2 100644
--- a/fuzzycat/simple.py
+++ b/fuzzycat/simple.py
@@ -25,8 +25,8 @@ from fuzzycat.common import Reason, Status
from fuzzycat.entities import entity_to_dict
from fuzzycat.grobid_unstructured import grobid_parse_unstructured
from fuzzycat.matching import match_release_fuzzy
-from fuzzycat.verify import verify
from fuzzycat.utils import clean_doi
+from fuzzycat.verify import verify
@dataclass
diff --git a/fuzzycat/utils.py b/fuzzycat/utils.py
index a1c5124..303daf6 100644
--- a/fuzzycat/utils.py
+++ b/fuzzycat/utils.py
@@ -81,6 +81,7 @@ def dict_key_exists(doc, path):
else:
return True
+
def clean_doi(raw: Optional[str]) -> Optional[str]:
if not raw:
return None
@@ -95,6 +96,7 @@ def clean_doi(raw: Optional[str]) -> Optional[str]:
raw = raw[:8] + raw[9:]
return raw
+
def doi_prefix(v):
"""
Return the prefix of a DOI.
diff --git a/fuzzycat/verify.py b/fuzzycat/verify.py
index 1eeea40..5b90c47 100644
--- a/fuzzycat/verify.py
+++ b/fuzzycat/verify.py
@@ -90,9 +90,9 @@ from fuzzycat.common import Reason, Status
from fuzzycat.data import (CONTAINER_NAME_BLACKLIST, PUBLISHER_BLACKLIST, TITLE_BLACKLIST,
TITLE_FRAGMENT_BLACKLIST)
from fuzzycat.entities import entity_to_dict
-from fuzzycat.utils import (author_similarity_score, contains_chemical_formula, dict_key_exists,
- doi_prefix, has_doi_prefix, jaccard, num_project, parse_page_string,
- slugify_string, clean_doi)
+from fuzzycat.utils import (author_similarity_score, clean_doi, contains_chemical_formula,
+ dict_key_exists, doi_prefix, has_doi_prefix, jaccard, num_project,
+ parse_page_string, slugify_string)
Verify = collections.namedtuple("Verify", "status reason")
@@ -597,8 +597,7 @@ def verify(a: Dict, b: Dict, min_title_length=5) -> Tuple[str, str]:
try:
a_parsed_pages = parse_page_string(glom(a, "pages"))
b_parsed_pages = parse_page_string(glom(b, "pages"))
- if (a_parsed_pages.count != None
- and b_parsed_pages.count != None
+ if (a_parsed_pages.count != None and b_parsed_pages.count != None
and abs(a_parsed_pages.count - b_parsed_pages.count) > 5):
return Verify(Status.DIFFERENT, Reason.PAGE_COUNT)
except (ValueError, PathAccessError):
diff --git a/notes/2021_09_review.md b/notes/2021_09_review.md
new file mode 100644
index 0000000..f15ed5b
--- /dev/null
+++ b/notes/2021_09_review.md
@@ -0,0 +1,129 @@
+# Fuzzy matching review and retrospective
+
+> 2021-09-15
+
+After [refcat](https://gitlab.com/internetarchive/refcat) has reached a
+milestone, I'd like to review fuzzycat and fuzzy matching in general; this
+should help pave the way to a slight redesign of the overall approach.
+
+## TL;DR
+
+* performance matters at scale and a faster language (e.g. Go) is essential
+* for small scale, the api matters more than performance
+* a lot of the code currently is base on specific schemas (e.g. release, a
+ specific elasticsearch mapping, etc), so not that much code is generic or
+ reusable - it also seems overkill to try to abstract the schema away
+
+## Ideas
+
+* [ ] use pydantic or dataclass to make schema more explicit
+* [ ] extend type annotation coverage
+* [ ] remove bulk stuff, remove clustering etc; improve the verification part
+* [ ] use cases: work merging
+
+A few more things to revisit:
+
+* [ ] revisit journal matching; what is weak, strong?
+* [ ] refactor: author list or string comparison
+* [ ] go beyond title matching when querying elasticsearch
+* [ ] better container name matching
+
+Take a look at:
+
+> https://github.com/djudd/human-name
+
+----
+
+## Redesign ideas
+
+### Large scale processing
+
+JSON decoding and encoding does not seem to be the bottleneck, but working with
+various, often optional fields gets expensive in Python (whereas in Go, we can
+use a struct).
+
+The mapping stage in
+[refcat/skate](https://gitlab.com/internetarchive/refcat/-/blob/3a79551dfe54ba668f7eee9de88625a0d33d9c7f/skate/map.go#L109-111)
+is a simple operation (blob to fields), that can be implemented in isolation
+and then [added to the command
+line](https://gitlab.com/internetarchive/refcat/-/blob/3a79551dfe54ba668f7eee9de88625a0d33d9c7f/skate/cmd/skate-map/main.go#L67-87).
+In skate, we already have over a dozen mappers working on various types.
+There's even a bit of [map
+middleware](https://gitlab.com/internetarchive/refcat/-/blob/3a79551dfe54ba668f7eee9de88625a0d33d9c7f/skate/map.go#L152-161).
+
+In fuzzycat, the
+[Cluster](https://git.archive.org/webgroup/fuzzycat/-/blob/c587a084defe54103aa147b7ab91542a11a548b1/fuzzycat/cluster.py#L309-347)
+class does mapping, via
+[key](https://git.archive.org/webgroup/fuzzycat/-/blob/c587a084defe54103aa147b7ab91542a11a548b1/fuzzycat/cluster.py#L331),
+[sorting](https://git.archive.org/webgroup/fuzzycat/-/blob/c587a084defe54103aa147b7ab91542a11a548b1/fuzzycat/cluster.py#L406-426),
+and a specific
+[grouping](https://git.archive.org/webgroup/fuzzycat/-/blob/c587a084defe54103aa147b7ab91542a11a548b1/fuzzycat/cluster.py#L428-454)
+all in one go.
+
+For example, we did not use the single cluster document in refcat/skate anymore
+(there, we keep two separate files and use an extra
+[zipkey](https://gitlab.com/internetarchive/refcat/-/blob/3a79551dfe54ba668f7eee9de88625a0d33d9c7f/skate/zipkey/zipkey.go#L23-33)
+type, which is a slightly generalized
+[comm](https://en.wikipedia.org/wiki/Comm), e.g. it allows to run a function
+over a cluster of documents (coming from currently two streams).
+
+A higher level command could encapsulate the whole pipeline, without needed an extra framework like luigi:
+
+ inputs A B
+ | |
+ mapped M1 M2
+ | |
+ sorted S1 S2
+ \ /
+ \ /
+ reduced V
+ |
+ |
+ C
+
+> Not sure, if we need mappers at all, if we have them in refcat.
+
+An a command could look like this.
+
+ $ fuzzycat pipeline -a A.json -b B.json --mapper-a "tn" --mapper-b "tn" --reduce "bref"
+
+Nice, if this would actually run fast. Could also be run programmatically:
+
+ output = fuzzycat_pipeline(a="A.json", b="B.json", mapper_a="tn", mapper_b="tn", reduce="bref")
+
+Mappers should have a minimal scope; each mapper will have a format it can work
+on. Reducers will have two inputs types specified.
+
+### Running continuously
+
+> With a couple of the inputs (metadata, extracted data, ...) getting updated
+> all the time, it might for the moment be simpler to rerun the derivation in
+> batch mode.
+
+A list of steps we would need to implement for continuous reference index updates:
+
+* a new metadata document arrives (e.g. via "changelog")
+* if the metadata contains outbound references, nice; if not, we try to download the associated PDF, run grobid and get the references out that way
+
+At this point, we have the easier part - outbound references - covered.
+
+Where do the outbound references of all existing docs live? In the database
+only, hence we cannot search for them currently.
+
+* [7ppmkfo5krb2zhefhwkwdp4mqe](https://search.fatcat.wiki/fatcat_release/_search?q=ident:7ppmkfo5krb2zhefhwkwdp4mqe)
+ says `ref_count` 12, but the list of refs we can only get via
+ [api](https://api.fatcat.wiki/v0/release/7ppmkfo5krb2zhefhwkwdp4mqe)
+
+We could add another elasticsearch index only for the raw refs. E.g. everytime
+an item is updated, this index gets updated as well (taking refs from the API
+and put them into ES). We can then query for any ID we find in the reference or
+any string match, etc. Once we find e.g. ten documents, that have the document
+in question in their reference list, we can update the reference index for each
+of these documents.
+
+We could keep a (weekly) refs snapshot file around that would be used for
+matching. The result would be the e.g. ten document, that refer to the document
+in question. We can take their ids and update the document to establish the
+link. The on-disk file (or files) should be all prepared, e.g. sorted by key,
+so the lookup will be fast.
+
diff --git a/notes/Makefile b/notes/Makefile
index 3a7dcaf..b4996f9 100644
--- a/notes/Makefile
+++ b/notes/Makefile
@@ -1,6 +1,11 @@
-steps.png: steps.dot
+TARGETS = workflow_schema.png
+
+.PHONY: all
+all: $(TARGETS)
+
+%.png: %.dot
dot -Tpng $^ > $@
.PHONY: clean
clean:
- rm -rf steps.png
+ rm -rf $(TARGETS)
diff --git a/notes/known_issues.md b/notes/known_issues.md
index 32194fd..991de5c 100644
--- a/notes/known_issues.md
+++ b/notes/known_issues.md
@@ -102,3 +102,20 @@ $ python -m fuzzycat verify_single | jq .
]
}
```
+
+## "jaccard authors" can be too weak
+
+```
+$ python -m fuzzycat verify_single | jq .
+{
+ "extra": {
+ "q": "https://fatcat.wiki/release/search?q=canes"
+ },
+ "a": "https://fatcat.wiki/release/ivhoiqvjt5cpxbdzzbuco7eciq",
+ "b": "https://fatcat.wiki/release/hprvn76ls5cbbkl2ypsyijojmu",
+ "r": [
+ "strong",
+ "jaccard_authors"
+ ]
+}
+```
diff --git a/notes/todo.md b/notes/todo_manual_review.md
index b3474f8..b3474f8 100644
--- a/notes/todo.md
+++ b/notes/todo_manual_review.md
diff --git a/notes/steps.dot b/notes/workflow_schema.dot
index 365163f..365163f 100644
--- a/notes/steps.dot
+++ b/notes/workflow_schema.dot
diff --git a/notes/steps.png b/notes/workflow_schema.png
index b321976..b321976 100644
--- a/notes/steps.png
+++ b/notes/workflow_schema.png
Binary files differ
diff --git a/tests/test_matching.py b/tests/test_matching.py
index 7ab7b11..90d1fee 100644
--- a/tests/test_matching.py
+++ b/tests/test_matching.py
@@ -9,7 +9,8 @@ from fatcat_openapi_client import ReleaseEntity
from fuzzycat.entities import entity_from_dict
from fuzzycat.matching import anything_to_entity, match_release_fuzzy
-warnings.filterwarnings("ignore") # InsecureRequestWarning: Unverified HTTPS request is being made to host ...
+warnings.filterwarnings(
+ "ignore") # InsecureRequestWarning: Unverified HTTPS request is being made to host ...
from fuzzycat.matching import anything_to_entity, match_release_fuzzy
from fuzzycat.config import settings
@@ -28,6 +29,7 @@ FATCAT_SEARCH_URL = settings.get("FATCAT_SEARCH_URL", "https://search.fatcat.wik
def is_not_reachable(url, timeout=3):
return not is_reachable(url)
+
def is_reachable(url, timeout=3):
"""
Return true, if URL is reachable and returns HTTP 200.
@@ -37,14 +39,21 @@ def is_reachable(url, timeout=3):
except Exception:
return False
+
@pytest.fixture
def es_client():
return elasticsearch.Elasticsearch([FATCAT_SEARCH_URL])
-@pytest.mark.skipif(is_not_reachable(FATCAT_SEARCH_URL),
- reason="{} not reachable, use e.g. FUZZYCAT_FATCAT_SEARCH_URL=localhost:9200 to override".format(FATCAT_SEARCH_URL))
+@pytest.mark.skipif(
+ is_not_reachable(FATCAT_SEARCH_URL),
+ reason="{} not reachable, use e.g. FUZZYCAT_FATCAT_SEARCH_URL=localhost:9200 to override".
+ format(FATCAT_SEARCH_URL))
def test_match_release_fuzzy(es_client, caplog):
+ """
+ This test is tied to the current index contents, so if that changes, this
+ test may fail as well.
+ """
cases = (
("wtv64ahbdzgwnan7rllwr3nurm", 1),
("eqcgtpav3na5jh56o5vjsvb4ei", 1),
@@ -63,17 +72,35 @@ def test_match_release_fuzzy(es_client, caplog):
"ext_ids": {}
}, 5),
({
- "title": "The Future of Digital Scholarship",
- "contribs": [{
- "raw_name": "Costantino Thanos"
- }],
+ "title": "unlikelytitle",
+ "ext_ids": {}
+ }, 0),
+ ({
+ "title": "Imminent dystopia",
+ "ext_ids": {}
+ }, 2),
+ ({
+ "title": "",
+ "contribs": [{"raw_name": "Aristoteles"}],
"ext_ids": {}
}, 5),
+ # ({
+ # "title": "Letter",
+ # "contribs": [{"raw_name": "Claudel"}],
+ # "ext_ids": {}
+ # }, 1),
+ # ({
+ # "title": "The Future of Digital Scholarship",
+ # "contribs": [{
+ # "raw_name": "Costantino Thanos"
+ # }],
+ # "ext_ids": {}
+ # }, 5),
)
for i, (doc, count) in enumerate(cases):
entity = entity_from_dict(doc, ReleaseEntity)
result = match_release_fuzzy(entity, es=es_client)
with caplog.at_level(logging.INFO):
- logging.info("[{}] given {}, found {}, {}".format(i, entity.title, len(result),
+ logging.info("[{}] given title '{}', found {}, {}".format(i, entity.title, len(result),
[v.title for v in result]))
- assert len(result) == count
+ assert len(result) == count, doc
diff --git a/tests/test_utils.py b/tests/test_utils.py
index 21b85a4..957203f 100644
--- a/tests/test_utils.py
+++ b/tests/test_utils.py
@@ -124,6 +124,7 @@ def test_es_compat_hits_total():
for r, expected in cases:
assert es_compat_hits_total(r) == expected
+
def test_clean_doi():
assert clean_doi(None) == None
assert clean_doi("blah") == None