aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rwxr-xr-xpython/fatcat_harvest.py2
-rwxr-xr-xpython/fatcat_import.py4
-rwxr-xr-xpython/fatcat_ingest.py2
-rw-r--r--python/fatcat_tools/harvest/harvest_common.py4
-rw-r--r--python/fatcat_tools/importers/common.py2
-rw-r--r--python/fatcat_tools/importers/crossref.py4
-rw-r--r--python/fatcat_tools/importers/datacite.py8
-rwxr-xr-xpython/fatcat_transform.py2
-rw-r--r--python/fatcat_web/__init__.py2
-rw-r--r--python/fatcat_web/entity_helpers.py2
-rw-r--r--python/fatcat_web/search.py2
-rw-r--r--python/tests/web_entity_views.py2
-rw-r--r--python/tests/web_search.py4
13 files changed, 20 insertions, 20 deletions
diff --git a/python/fatcat_harvest.py b/python/fatcat_harvest.py
index 58bef9ca..efd78685 100755
--- a/python/fatcat_harvest.py
+++ b/python/fatcat_harvest.py
@@ -83,7 +83,7 @@ def main():
help="Kafka topic namespace to use (eg, prod, qa, dev)")
parser.add_argument('--start-date',
default=None, type=mkdate,
- help="begining of harvest period")
+ help="beginning of harvest period")
parser.add_argument('--end-date',
default=None, type=mkdate,
help="end of harvest period")
diff --git a/python/fatcat_import.py b/python/fatcat_import.py
index 843685aa..c70cb426 100755
--- a/python/fatcat_import.py
+++ b/python/fatcat_import.py
@@ -414,7 +414,7 @@ def main():
help="whether postproc_status column must be '200'")
sub_arabesque_match.add_argument('--extid-type',
default="doi",
- help="identifer type in the database (eg, 'doi', 'pmcid'")
+ help="identifier type in the database (eg, 'doi', 'pmcid'")
sub_arabesque_match.add_argument('--crawl-id',
help="crawl ID (optionally included in editgroup metadata)")
sub_arabesque_match.add_argument('--default-link-rel',
@@ -422,7 +422,7 @@ def main():
help="default URL rel for matches (eg, 'publisher', 'web')")
sub_ingest_file = subparsers.add_parser('ingest-file-results',
- help="add/update flie entities linked to releases based on sandcrawler ingest results")
+ help="add/update file entities linked to releases based on sandcrawler ingest results")
sub_ingest_file.set_defaults(
func=run_ingest_file,
auth_var="FATCAT_AUTH_WORKER_CRAWL",
diff --git a/python/fatcat_ingest.py b/python/fatcat_ingest.py
index 6fda74c5..6c3c8859 100755
--- a/python/fatcat_ingest.py
+++ b/python/fatcat_ingest.py
@@ -183,7 +183,7 @@ def main():
help="list of Kafka brokers (host/port) to use")
parser.add_argument('--elasticsearch-endpoint',
default="https://search.fatcat.wiki",
- help="elasticsearch API. internal endpoint prefered, but public is default")
+ help="elasticsearch API. internal endpoint preferred, but public is default")
parser.add_argument('--env',
default="dev",
help="Kafka topic namespace to use (eg, prod, qa, dev)")
diff --git a/python/fatcat_tools/harvest/harvest_common.py b/python/fatcat_tools/harvest/harvest_common.py
index 310366bd..5f7aa084 100644
--- a/python/fatcat_tools/harvest/harvest_common.py
+++ b/python/fatcat_tools/harvest/harvest_common.py
@@ -133,7 +133,7 @@ class HarvestState:
def fail_fast(err, msg):
if err:
raise KafkaException(err)
- print("Commiting status to Kafka: {}".format(kafka_topic), file=sys.stderr)
+ print("Committing status to Kafka: {}".format(kafka_topic), file=sys.stderr)
producer_conf = kafka_config.copy()
producer_conf.update({
'delivery.report.only.error': True,
@@ -164,7 +164,7 @@ class HarvestState:
raise KafkaException(err)
conf = kafka_config.copy()
conf.update({
- 'group.id': 'dummy_init_group', # should never be commited
+ 'group.id': 'dummy_init_group', # should never be committed
'enable.auto.commit': False,
'auto.offset.reset': 'earliest',
'session.timeout.ms': 10000,
diff --git a/python/fatcat_tools/importers/common.py b/python/fatcat_tools/importers/common.py
index a84ce90f..694ef359 100644
--- a/python/fatcat_tools/importers/common.py
+++ b/python/fatcat_tools/importers/common.py
@@ -751,7 +751,7 @@ class KafkaJsonPusher(RecordPusher):
while True:
# Note: this is batch-oriented, because underlying importer is
# often batch-oriented, but this doesn't confirm that entire batch
- # has been pushed to fatcat before commiting offset. Eg, consider
+ # has been pushed to fatcat before committing offset. Eg, consider
# case where there there is one update and thousands of creates;
# update would be lingering in importer, and if importer crashed
# never created.
diff --git a/python/fatcat_tools/importers/crossref.py b/python/fatcat_tools/importers/crossref.py
index d8abf3eb..18703a1a 100644
--- a/python/fatcat_tools/importers/crossref.py
+++ b/python/fatcat_tools/importers/crossref.py
@@ -9,7 +9,7 @@ import fatcat_openapi_client
from .common import EntityImporter, clean
-# The docs/guide should be the cannonical home for these mappings; update there
+# The docs/guide should be the canonical home for these mappings; update there
# first
# Can get a list of Crossref types (with counts) via API:
# https://api.crossref.org/works?rows=0&facet=type-name:*
@@ -180,7 +180,7 @@ class CrossrefImporter(EntityImporter):
self.counts['skip-release-type'] += 1
return None
- # Do require the 'title' keys to exsit, as release entities do
+ # Do require the 'title' keys to exist, as release entities do
if (not 'title' in obj) or (not obj['title']):
self.counts['skip-blank-title'] += 1
return None
diff --git a/python/fatcat_tools/importers/datacite.py b/python/fatcat_tools/importers/datacite.py
index 4e382348..9250fc5e 100644
--- a/python/fatcat_tools/importers/datacite.py
+++ b/python/fatcat_tools/importers/datacite.py
@@ -3,7 +3,7 @@ Prototype importer for datacite.org data.
Example input document: https://api.datacite.org/dois/10.7916/d8-f93n-rk51
-Datacite being an aggregator, the data is heterogenous and exposes a couple of
+Datacite being an aggregator, the data is heterogeneous and exposes a couple of
problems in content and structure. A few fields have their own parsing
functions (parse_datacite_...), which may help testing.
"""
@@ -36,7 +36,7 @@ CONTAINER_TYPE_MAP = {
'Book Series': 'book-series',
}
-# The docs/guide should be the cannonical home for these mappings; update there
+# The docs/guide should be the canonical home for these mappings; update there
# first. Map various datacite type types to CSL-ish types. None means TODO or
# remove.
DATACITE_TYPE_MAP = {
@@ -227,7 +227,7 @@ class DataciteImporter(EntityImporter):
def lookup_ext_ids(self, doi):
"""
- Return dictionary of identifiers refering to the same things as the given DOI.
+ Return dictionary of identifiers referring to the same things as the given DOI.
"""
if self.extid_map_db is None:
return dict(core_id=None,
@@ -577,7 +577,7 @@ class DataciteImporter(EntityImporter):
# Include certain relations from relatedIdentifiers. Keeping the
# original structure of data here, which is a list of dicts, with
- # relation type, identifer and identifier type (mostly).
+ # relation type, identifier and identifier type (mostly).
relations = []
for rel in relIds:
if rel.get('relationType') in ('IsPartOf', 'Reviews', 'Continues',
diff --git a/python/fatcat_transform.py b/python/fatcat_transform.py
index 23a56109..14595670 100755
--- a/python/fatcat_transform.py
+++ b/python/fatcat_transform.py
@@ -1,7 +1,7 @@
#!/usr/bin/env python3
"""
-Utility script for doing bulk conversion/tranforms of entity JSON schema to
+Utility script for doing bulk conversion/transforms of entity JSON schema to
other formats
"""
diff --git a/python/fatcat_web/__init__.py b/python/fatcat_web/__init__.py
index 50757858..56a2e020 100644
--- a/python/fatcat_web/__init__.py
+++ b/python/fatcat_web/__init__.py
@@ -61,7 +61,7 @@ else:
print("No privileged token found")
priv_api = None
-# TODO: refactor integration so this doesn't always need to be definied. If
+# TODO: refactor integration so this doesn't always need to be defined. If
# key/secret are empty, library will not init; if init is skipped, get
# undefined errors elsewhere.
mwoauth = MWOAuth(
diff --git a/python/fatcat_web/entity_helpers.py b/python/fatcat_web/entity_helpers.py
index 591dda80..520bb832 100644
--- a/python/fatcat_web/entity_helpers.py
+++ b/python/fatcat_web/entity_helpers.py
@@ -164,7 +164,7 @@ def generic_get_editgroup_entity(editgroup, entity_type, ident):
edit = e
break
if not revision_id:
- # couldn't find relevent edit in this editgroup
+ # couldn't find relevant edit in this editgroup
abort(404)
entity = generic_get_entity_revision(entity_type, revision_id)
diff --git a/python/fatcat_web/search.py b/python/fatcat_web/search.py
index 6b2b9cc1..c1246d22 100644
--- a/python/fatcat_web/search.py
+++ b/python/fatcat_web/search.py
@@ -299,7 +299,7 @@ def get_elastic_container_histogram(ident):
"""
Fetches a stacked histogram of
- Filters to the past 500 years (at most), or about 1000 vaules.
+ Filters to the past 500 years (at most), or about 1000 values.
Returns a list of tuples:
(year, in_ia, count)
diff --git a/python/tests/web_entity_views.py b/python/tests/web_entity_views.py
index 23a2b33b..cc4c498f 100644
--- a/python/tests/web_entity_views.py
+++ b/python/tests/web_entity_views.py
@@ -63,7 +63,7 @@ def test_entity_basics(app):
# TODO: redirects and deleted entities
def test_web_deleted_release(app, api):
- # specific regresion test for view of a deleted release
+ # specific regression test for view of a deleted release
# create release
eg = quick_eg(api)
diff --git a/python/tests/web_search.py b/python/tests/web_search.py
index 19e2c29f..24b817dc 100644
--- a/python/tests/web_search.py
+++ b/python/tests/web_search.py
@@ -75,7 +75,7 @@ def test_stats(app):
json=elastic_resp3.copy(), status=200)
rv = app.get('/stats')
assert rv.status_code == 200
- # TODO: probe these reponses better
+ # TODO: robe these responses better
@responses.activate
def test_stats_json(app):
@@ -112,7 +112,7 @@ def test_container_stats(app):
json=elastic_resp, status=200)
rv = app.get('/container/issnl/1234-5678/stats.json')
assert rv.status_code == 200
- # TODO: probe this reponse better
+ # TODO: probe this response better
# TODO: container stats
# TODO: container ISSN-L query