summaryrefslogtreecommitdiffstats
path: root/python/fatcat_tools
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@robocracy.org>2021-11-02 17:55:15 -0700
committerBryan Newbold <bnewbold@robocracy.org>2021-11-02 17:55:22 -0700
commit6fa2d38be243531747241a3ae602069d507368d9 (patch)
tree7cc81446a97a372640f6a189f09b88fa466e77ce /python/fatcat_tools
parent367b06f64546e4533662017c9dbe72aca175a294 (diff)
downloadfatcat-6fa2d38be243531747241a3ae602069d507368d9.tar.gz
fatcat-6fa2d38be243531747241a3ae602069d507368d9.zip
lint: simple, safe inline lint fixes
'==' vs 'is'; 'not a in b' vs 'a not in b'; etc
Diffstat (limited to 'python/fatcat_tools')
-rw-r--r--python/fatcat_tools/cleanups/files.py2
-rw-r--r--python/fatcat_tools/harvest/harvest_common.py2
-rw-r--r--python/fatcat_tools/harvest/pubmed.py2
-rw-r--r--python/fatcat_tools/importers/arabesque.py4
-rw-r--r--python/fatcat_tools/importers/crossref.py3
-rw-r--r--python/fatcat_tools/importers/datacite.py4
-rw-r--r--python/fatcat_tools/importers/dblp_release.py4
-rw-r--r--python/fatcat_tools/importers/doaj_article.py2
-rw-r--r--python/fatcat_tools/importers/file_meta.py2
-rw-r--r--python/fatcat_tools/importers/fileset_generic.py2
-rw-r--r--python/fatcat_tools/importers/ingest.py12
-rw-r--r--python/fatcat_tools/importers/matched.py4
-rw-r--r--python/fatcat_tools/importers/orcid.py2
-rw-r--r--python/fatcat_tools/importers/pubmed.py2
-rw-r--r--python/fatcat_tools/importers/shadow.py2
-rw-r--r--python/fatcat_tools/normal.py104
-rw-r--r--python/fatcat_tools/reviewers/review_common.py2
-rw-r--r--python/fatcat_tools/transforms/elasticsearch.py10
18 files changed, 82 insertions, 83 deletions
diff --git a/python/fatcat_tools/cleanups/files.py b/python/fatcat_tools/cleanups/files.py
index a40e4a28..10dd45cc 100644
--- a/python/fatcat_tools/cleanups/files.py
+++ b/python/fatcat_tools/cleanups/files.py
@@ -27,7 +27,7 @@ class FileCleaner(EntityCleaner):
"""
# URL has ://web.archive.org/web/None/ link => delete URL
- entity.urls = [u for u in entity.urls if not '://web.archive.org/web/None/' in u.url]
+ entity.urls = [u for u in entity.urls if '://web.archive.org/web/None/' not in u.url]
# URL has ://archive.org/ link with rel=repository => rel=archive
for u in entity.urls:
diff --git a/python/fatcat_tools/harvest/harvest_common.py b/python/fatcat_tools/harvest/harvest_common.py
index bdae3054..5e7702d9 100644
--- a/python/fatcat_tools/harvest/harvest_common.py
+++ b/python/fatcat_tools/harvest/harvest_common.py
@@ -77,7 +77,7 @@ class HarvestState:
current = start_date
while current <= end_date:
- if not current in self.completed:
+ if current not in self.completed:
self.to_process.add(current)
current += datetime.timedelta(days=1)
diff --git a/python/fatcat_tools/harvest/pubmed.py b/python/fatcat_tools/harvest/pubmed.py
index 92798a99..ee55f4eb 100644
--- a/python/fatcat_tools/harvest/pubmed.py
+++ b/python/fatcat_tools/harvest/pubmed.py
@@ -301,7 +301,7 @@ def xmlstream(filename, tag, encoding='utf-8'):
Known vulnerabilities: https://docs.python.org/3/library/xml.html#xml-vulnerabilities
"""
def strip_ns(tag):
- if not '}' in tag:
+ if '}' not in tag:
return tag
return tag.split('}')[1]
diff --git a/python/fatcat_tools/importers/arabesque.py b/python/fatcat_tools/importers/arabesque.py
index 79fb10d3..ccf35446 100644
--- a/python/fatcat_tools/importers/arabesque.py
+++ b/python/fatcat_tools/importers/arabesque.py
@@ -62,13 +62,13 @@ class ArabesqueMatchImporter(EntityImporter):
def want(self, row):
if self.require_grobid and not row['postproc_status'] == "200":
return False
- if (row['hit'] == True
+ if (row['hit'] is True
and row['final_sha1']
and row['final_timestamp']
and row['final_timestamp'] != "-"
and len(row['final_timestamp']) == 14
and row['final_mimetype']
- and row['hit'] == True
+ and row['hit'] is True
and row['identifier']):
return True
else:
diff --git a/python/fatcat_tools/importers/crossref.py b/python/fatcat_tools/importers/crossref.py
index bd72a781..38c19a63 100644
--- a/python/fatcat_tools/importers/crossref.py
+++ b/python/fatcat_tools/importers/crossref.py
@@ -205,7 +205,7 @@ class CrossrefImporter(EntityImporter):
return None
# Do require the 'title' keys to exist, as release entities do
- if (not 'title' in obj) or (not obj['title']):
+ if ('title' not in obj) or (not obj['title']):
self.counts['skip-blank-title'] += 1
return None
@@ -429,7 +429,6 @@ class CrossrefImporter(EntityImporter):
release_year = raw_date[0]
release_date = None
-
original_title: Optional[str] = None
if obj.get('original-title'):
ot = obj.get('original-title')
diff --git a/python/fatcat_tools/importers/datacite.py b/python/fatcat_tools/importers/datacite.py
index eb49596f..1593e6f8 100644
--- a/python/fatcat_tools/importers/datacite.py
+++ b/python/fatcat_tools/importers/datacite.py
@@ -319,7 +319,7 @@ class DataciteImporter(EntityImporter):
# 17871 | translator
# 10870584 |
# (4 rows)
- #
+ #
# Related: https://guide.fatcat.wiki/entity_release.html -- role
# (string, of a set): the type of contribution, from a controlled
# vocabulary. TODO: vocabulary needs review.
@@ -1046,7 +1046,7 @@ def find_original_language_title(item, min_length=4, max_questionmarks=3):
Example input: {'title': 'Some title', 'original_language_title': 'Some title'}
"""
- if not 'original_language_title' in item:
+ if 'original_language_title' not in item:
return None
title = item.get('title')
if not title:
diff --git a/python/fatcat_tools/importers/dblp_release.py b/python/fatcat_tools/importers/dblp_release.py
index 670f190b..fa5cb842 100644
--- a/python/fatcat_tools/importers/dblp_release.py
+++ b/python/fatcat_tools/importers/dblp_release.py
@@ -93,7 +93,7 @@ class DblpReleaseImporter(EntityImporter):
return self._dblp_container_map.get(prefix)
def want(self, xml_elem):
- if not xml_elem.name in self.ELEMENT_TYPES:
+ if xml_elem.name not in self.ELEMENT_TYPES:
self.counts['skip-type'] += 1
return False
if not xml_elem.get('key'):
@@ -243,7 +243,7 @@ class DblpReleaseImporter(EntityImporter):
# dblp-specific extra
dblp_extra = dict(type=dblp_type)
note = clean_str(xml_elem.note and xml_elem.note.text)
- if note and not 'base-search.net' in note:
+ if note and 'base-search.net' not in note:
dblp_extra['note'] = note
if part_of_key:
dblp_extra['part_of_key'] = part_of_key
diff --git a/python/fatcat_tools/importers/doaj_article.py b/python/fatcat_tools/importers/doaj_article.py
index 191a65d8..833089ae 100644
--- a/python/fatcat_tools/importers/doaj_article.py
+++ b/python/fatcat_tools/importers/doaj_article.py
@@ -73,7 +73,7 @@ class DoajArticleImporter(EntityImporter):
}
"""
- if not obj or not isinstance(obj, dict) or not 'bibjson' in obj:
+ if not obj or not isinstance(obj, dict) or 'bibjson' not in obj:
self.counts['skip-empty'] += 1
return None
diff --git a/python/fatcat_tools/importers/file_meta.py b/python/fatcat_tools/importers/file_meta.py
index 9f4b9e06..3d9f5923 100644
--- a/python/fatcat_tools/importers/file_meta.py
+++ b/python/fatcat_tools/importers/file_meta.py
@@ -35,7 +35,7 @@ class FileMetaImporter(EntityImporter):
def parse_record(self, row):
# bezerk mode doesn't make sense for this importer
- assert self.bezerk_mode == False
+ assert self.bezerk_mode is False
file_meta = row
fe = fatcat_openapi_client.FileEntity(
diff --git a/python/fatcat_tools/importers/fileset_generic.py b/python/fatcat_tools/importers/fileset_generic.py
index f0ad5460..13352fb2 100644
--- a/python/fatcat_tools/importers/fileset_generic.py
+++ b/python/fatcat_tools/importers/fileset_generic.py
@@ -30,7 +30,7 @@ class FilesetImporter(EntityImporter):
**kwargs)
# bezerk mode doesn't make sense for this importer
- assert self.bezerk_mode == False
+ assert self.bezerk_mode is False
def want(self, row):
if not row.get('release_ids'):
diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py
index cb663330..4d4efc0a 100644
--- a/python/fatcat_tools/importers/ingest.py
+++ b/python/fatcat_tools/importers/ingest.py
@@ -78,7 +78,7 @@ class IngestFileResultImporter(EntityImporter):
Sandcrawler ingest-specific part of want(). Generic across file and
webcapture ingest.
"""
- if row.get('hit') != True:
+ if row.get('hit') is not True:
self.counts['skip-hit'] += 1
return False
source = row['request'].get('ingest_request_source')
@@ -178,9 +178,9 @@ class IngestFileResultImporter(EntityImporter):
}
# work around old schema
- if not 'terminal_url' in terminal:
+ if 'terminal_url' not in terminal:
terminal['terminal_url'] = terminal['url']
- if not 'terminal_dt' in terminal:
+ if 'terminal_dt' not in terminal:
terminal['terminal_dt'] = terminal['dt']
# convert CDX-style digits to ISO-style timestamp
@@ -358,7 +358,7 @@ class SavePaperNowFileImporter(IngestFileResultImporter):
self.counts['skip-not-savepapernow'] += 1
return False
- if row.get('hit') != True:
+ if row.get('hit') is not True:
self.counts['skip-hit'] += 1
return False
@@ -459,7 +459,7 @@ class IngestWebResultImporter(IngestFileResultImporter):
for resource in row.get('html_resources', []):
timestamp = resource['timestamp']
- if not "+" in timestamp and not "Z" in timestamp:
+ if "+" not in timestamp and "Z" not in timestamp:
timestamp += "Z"
wc_cdx.append(fatcat_openapi_client.WebcaptureCdxLine(
surt=resource['surt'],
@@ -808,7 +808,7 @@ class SavePaperNowFilesetImporter(IngestFilesetResultImporter):
self.counts['skip-not-savepapernow'] += 1
return False
- if row.get('hit') != True:
+ if row.get('hit') is not True:
self.counts['skip-hit'] += 1
return False
diff --git a/python/fatcat_tools/importers/matched.py b/python/fatcat_tools/importers/matched.py
index e0e4fc3c..09807276 100644
--- a/python/fatcat_tools/importers/matched.py
+++ b/python/fatcat_tools/importers/matched.py
@@ -94,7 +94,7 @@ class MatchedImporter(EntityImporter):
urls = set()
for url in obj.get('urls', []):
url = make_rel_url(url, default_link_rel=self.default_link_rel)
- if url != None:
+ if url is not None:
urls.add(url)
for cdx in obj.get('cdx', []):
original = cdx['url']
@@ -104,7 +104,7 @@ class MatchedImporter(EntityImporter):
original)
urls.add(("webarchive", wayback))
url = make_rel_url(original, default_link_rel=self.default_link_rel)
- if url != None:
+ if url is not None:
urls.add(url)
urls = [fatcat_openapi_client.FileUrl(rel=rel, url=url) for (rel, url) in urls]
if len(urls) == 0:
diff --git a/python/fatcat_tools/importers/orcid.py b/python/fatcat_tools/importers/orcid.py
index 21feea9e..4412a46d 100644
--- a/python/fatcat_tools/importers/orcid.py
+++ b/python/fatcat_tools/importers/orcid.py
@@ -40,7 +40,7 @@ class OrcidImporter(EntityImporter):
returns a CreatorEntity
"""
- if not 'person' in obj:
+ if 'person' not in obj:
return False
name = obj['person']['name']
diff --git a/python/fatcat_tools/importers/pubmed.py b/python/fatcat_tools/importers/pubmed.py
index c9907c5e..00ad54d0 100644
--- a/python/fatcat_tools/importers/pubmed.py
+++ b/python/fatcat_tools/importers/pubmed.py
@@ -590,7 +590,7 @@ class PubmedImporter(EntityImporter):
orcid = orcid.replace("http://orcid.org/", "")
elif orcid.startswith("https://orcid.org/"):
orcid = orcid.replace("https://orcid.org/", "")
- elif not '-' in orcid:
+ elif '-' not in orcid:
orcid = "{}-{}-{}-{}".format(
orcid[0:4],
orcid[4:8],
diff --git a/python/fatcat_tools/importers/shadow.py b/python/fatcat_tools/importers/shadow.py
index fa9b4d10..77205cee 100644
--- a/python/fatcat_tools/importers/shadow.py
+++ b/python/fatcat_tools/importers/shadow.py
@@ -95,7 +95,7 @@ class ShadowLibraryImporter(EntityImporter):
urls = []
if obj.get('cdx'):
url = make_rel_url(obj['cdx']['url'], default_link_rel=self.default_link_rel)
- if url != None:
+ if url is not None:
urls.append(url)
wayback = "https://web.archive.org/web/{}/{}".format(
obj['cdx']['datetime'],
diff --git a/python/fatcat_tools/normal.py b/python/fatcat_tools/normal.py
index eb61c326..24c0bb0a 100644
--- a/python/fatcat_tools/normal.py
+++ b/python/fatcat_tools/normal.py
@@ -74,19 +74,19 @@ def test_clean_doi():
assert clean_doi("10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("10.1037//0002-9432.72.1.50") == "10.1037/0002-9432.72.1.50"
assert clean_doi("10.1037/0002-9432.72.1.50") == "10.1037/0002-9432.72.1.50"
- assert clean_doi("10.23750/abm.v88i2 -s.6506") == None
- assert clean_doi("10.17167/mksz.2017.2.129–155") == None
+ assert clean_doi("10.23750/abm.v88i2 -s.6506") is None
+ assert clean_doi("10.17167/mksz.2017.2.129–155") is None
assert clean_doi("http://doi.org/10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("https://dx.doi.org/10.1234/asdf ") == "10.1234/asdf"
assert clean_doi("doi:10.1234/asdf ") == "10.1234/asdf"
- assert clean_doi("doi:10.1234/ asdf ") == None
- assert clean_doi("10.4149/gpb¬_2017042") == None # "logical negation" character
- assert clean_doi("10.6002/ect.2020.häyry") == None # this example via pubmed (pmid:32519616)
- assert clean_doi("10.30466/vrf.2019.98547.2350\u200e") == None
- assert clean_doi("10.12016/j.issn.2096⁃1456.2017.06.014") == None
- assert clean_doi("10.4025/diálogos.v17i2.36030") == None
- assert clean_doi("10.19027/jai.10.106‒115") == None
- assert clean_doi("10.15673/атбп2312-3125.17/2014.26332") == None
+ assert clean_doi("doi:10.1234/ asdf ") is None
+ assert clean_doi("10.4149/gpb¬_2017042") is None # "logical negation" character
+ assert clean_doi("10.6002/ect.2020.häyry") is None # this example via pubmed (pmid:32519616)
+ assert clean_doi("10.30466/vrf.2019.98547.2350\u200e") is None
+ assert clean_doi("10.12016/j.issn.2096⁃1456.2017.06.014") is None
+ assert clean_doi("10.4025/diálogos.v17i2.36030") is None
+ assert clean_doi("10.19027/jai.10.106‒115") is None
+ assert clean_doi("10.15673/атбп2312-3125.17/2014.26332") is None
assert clean_doi("10.7326/M20-6817") == "10.7326/m20-6817"
@@ -129,17 +129,17 @@ def test_clean_arxiv_id():
assert clean_arxiv_id("arxiv:0806.2878v1") == "0806.2878v1"
assert clean_arxiv_id("arXiv:0806.2878v1") == "0806.2878v1"
- assert clean_arxiv_id("hep-TH/9901001v1") == None
- assert clean_arxiv_id("hßp-th/9901001v1") == None
- assert clean_arxiv_id("math.CA/06l1800v2") == None
- assert clean_arxiv_id("mßth.ca/0611800v2") == None
- assert clean_arxiv_id("MATH.CA/0611800v2") == None
+ assert clean_arxiv_id("hep-TH/9901001v1") is None
+ assert clean_arxiv_id("hßp-th/9901001v1") is None
+ assert clean_arxiv_id("math.CA/06l1800v2") is None
+ assert clean_arxiv_id("mßth.ca/0611800v2") is None
+ assert clean_arxiv_id("MATH.CA/0611800v2") is None
assert clean_arxiv_id("0806.2878v23") == "0806.2878v23" # ?
- assert clean_arxiv_id("0806.2878v") == None
+ assert clean_arxiv_id("0806.2878v") is None
assert clean_arxiv_id("0806.2878") == "0806.2878"
- assert clean_arxiv_id("006.2878v1") == None
- assert clean_arxiv_id("0806.v1") == None
- assert clean_arxiv_id("08062878v1") == None
+ assert clean_arxiv_id("006.2878v1") is None
+ assert clean_arxiv_id("0806.v1") is None
+ assert clean_arxiv_id("08062878v1") is None
def clean_wikidata_qid(raw):
if not raw:
@@ -155,13 +155,13 @@ def test_clean_wikidata_qid():
assert clean_wikidata_qid("Q1234") == "Q1234"
assert clean_wikidata_qid("Q1") == "Q1"
assert clean_wikidata_qid(" Q1234 ") == "Q1234"
- assert clean_wikidata_qid(" Q1 234 ") == None
- assert clean_wikidata_qid("q1234") == None
- assert clean_wikidata_qid("1234 ") == None
- assert clean_wikidata_qid("Q0123") == None
- assert clean_wikidata_qid("PMC123") == None
- assert clean_wikidata_qid("qfba3") == None
- assert clean_wikidata_qid("") == None
+ assert clean_wikidata_qid(" Q1 234 ") is None
+ assert clean_wikidata_qid("q1234") is None
+ assert clean_wikidata_qid("1234 ") is None
+ assert clean_wikidata_qid("Q0123") is None
+ assert clean_wikidata_qid("PMC123") is None
+ assert clean_wikidata_qid("qfba3") is None
+ assert clean_wikidata_qid("") is None
def clean_pmid(raw: str) -> Optional[str]:
if not raw:
@@ -176,9 +176,9 @@ def clean_pmid(raw: str) -> Optional[str]:
def test_clean_pmid():
assert clean_pmid("1234") == "1234"
assert clean_pmid("1234 ") == "1234"
- assert clean_pmid("PMC123") == None
- assert clean_pmid("qfba3") == None
- assert clean_pmid("") == None
+ assert clean_pmid("PMC123") is None
+ assert clean_pmid("qfba3") is None
+ assert clean_pmid("") is None
def clean_pmcid(raw: str) -> Optional[str]:
if not raw:
@@ -206,9 +206,9 @@ def clean_sha1(raw: str) -> Optional[str]:
def test_clean_sha1():
assert clean_sha1("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") == "0fba3fba0e1937aa0297de3836b768b5dfb23d7b"
assert clean_sha1("0fba3fba0e1937aa0297de3836b768b5dfb23d7b ") == "0fba3fba0e1937aa0297de3836b768b5dfb23d7b"
- assert clean_sha1("fba3fba0e1937aa0297de3836b768b5dfb23d7b") == None
- assert clean_sha1("qfba3fba0e1937aa0297de3836b768b5dfb23d7b") == None
- assert clean_sha1("0fba3fb a0e1937aa0297de3836b768b5dfb23d7b") == None
+ assert clean_sha1("fba3fba0e1937aa0297de3836b768b5dfb23d7b") is None
+ assert clean_sha1("qfba3fba0e1937aa0297de3836b768b5dfb23d7b") is None
+ assert clean_sha1("0fba3fb a0e1937aa0297de3836b768b5dfb23d7b") is None
def clean_sha256(raw: str) -> Optional[str]:
raw = raw.strip().lower()
@@ -223,7 +223,7 @@ def clean_sha256(raw: str) -> Optional[str]:
def test_clean_sha256():
assert clean_sha256("6cc853f2ae75696b2e45f476c76b946b0fc2df7c52bb38287cb074aceb77bc7f") == "6cc853f2ae75696b2e45f476c76b946b0fc2df7c52bb38287cb074aceb77bc7f"
- assert clean_sha256("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") == None
+ assert clean_sha256("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") is None
ISSN_REGEX = re.compile(r"^\d{4}-\d{3}[0-9X]$")
@@ -240,8 +240,8 @@ def clean_issn(raw: str) -> Optional[str]:
def test_clean_issn():
assert clean_issn("1234-4567") == "1234-4567"
assert clean_issn("1234-456X") == "1234-456X"
- assert clean_issn("134-4567") == None
- assert clean_issn("123X-4567") == None
+ assert clean_issn("134-4567") is None
+ assert clean_issn("123X-4567") is None
ISBN13_REGEX = re.compile(r"^97(?:8|9)-\d{1,5}-\d{1,7}-\d{1,6}-\d$")
@@ -257,7 +257,7 @@ def test_clean_isbn13():
assert clean_isbn13("978-1-56619-909-4") == "978-1-56619-909-4"
assert clean_isbn13("978-1-4028-9462-6") == "978-1-4028-9462-6"
assert clean_isbn13("978-1-56619-909-4 ") == "978-1-56619-909-4"
- assert clean_isbn13("9781566199094") == None
+ assert clean_isbn13("9781566199094") is None
ORCID_REGEX = re.compile(r"^\d{4}-\d{4}-\d{4}-\d{3}[\dX]$")
@@ -273,8 +273,8 @@ def test_clean_orcid():
assert clean_orcid("0123-4567-3456-6789") == "0123-4567-3456-6789"
assert clean_orcid("0123-4567-3456-678X") == "0123-4567-3456-678X"
assert clean_orcid("0123-4567-3456-6789 ") == "0123-4567-3456-6789"
- assert clean_orcid("01234567-3456-6780") == None
- assert clean_orcid("0x23-4567-3456-6780") == None
+ assert clean_orcid("01234567-3456-6780") is None
+ assert clean_orcid("0x23-4567-3456-6780") is None
HDL_REGEX = re.compile(r"^\d+(\.\d+)*/\S+$")
@@ -304,10 +304,10 @@ def test_clean_hdl():
assert clean_hdl("http://hdl.handle.net/20.500.23456/ABC/DUMMY") == "20.500.23456/abc/dummy"
assert clean_hdl("21.1234/aksjdfh") == "21.1234/aksjdfh"
assert clean_hdl("2381/12775") == "2381/12775"
- assert clean_hdl("10.1234/aksjdfh") == None
- assert clean_hdl("20.1234") == None
- assert clean_hdl("20.1234/") == None
- assert clean_hdl("20./asdf") == None
+ assert clean_hdl("10.1234/aksjdfh") is None
+ assert clean_hdl("20.1234") is None
+ assert clean_hdl("20.1234/") is None
+ assert clean_hdl("20./asdf") is None
def clean_str(thing: Optional[str], force_xml: bool = False) -> Optional[str]:
@@ -337,9 +337,9 @@ def clean_str(thing: Optional[str], force_xml: bool = False) -> Optional[str]:
def test_clean_str():
- assert clean_str(None) == None
- assert clean_str('') == None
- assert clean_str('1') == None
+ assert clean_str(None) is None
+ assert clean_str('') is None
+ assert clean_str('1') is None
assert clean_str('123') == '123'
assert clean_str('a&amp;b') == 'a&b'
assert clean_str('<b>a&amp;b</b>') == '<b>a&amp;b</b>'
@@ -410,9 +410,9 @@ def parse_month(raw: Optional[str]) -> Optional[int]:
def test_parse_month() -> None:
- assert parse_month(None) == None
- assert parse_month("") == None
- assert parse_month("0") == None
+ assert parse_month(None) is None
+ assert parse_month("") is None
+ assert parse_month("0") is None
assert parse_month("10") == 10
assert parse_month("jan") == 1
assert parse_month("September") == 9
@@ -435,7 +435,7 @@ def detect_text_lang(raw: str) -> Optional[str]:
return None
def test_detect_text_lang() -> None:
- assert detect_text_lang("") == None
+ assert detect_text_lang("") is None
EN_SAMPLE = "this is a string of English text for testing"
assert detect_text_lang(EN_SAMPLE) == "en"
JA_SAMPLE = "モーラの種類は、以下に示すように111程度存在する。ただし、研究者により数え方が少しずつ異なる。"
@@ -465,9 +465,9 @@ def parse_lang_name(raw: Optional[str]) -> Optional[str]:
def test_parse_lang_name() -> None:
- assert parse_lang_name(None) == None
- assert parse_lang_name("") == None
- assert parse_lang_name("asdf ") == None
+ assert parse_lang_name(None) is None
+ assert parse_lang_name("") is None
+ assert parse_lang_name("asdf ") is None
assert parse_lang_name("english") == "en"
assert parse_lang_name("ENGLISH") == "en"
assert parse_lang_name("asdf blah") is None
diff --git a/python/fatcat_tools/reviewers/review_common.py b/python/fatcat_tools/reviewers/review_common.py
index 232dc57d..b4930c19 100644
--- a/python/fatcat_tools/reviewers/review_common.py
+++ b/python/fatcat_tools/reviewers/review_common.py
@@ -92,7 +92,7 @@ class ReviewBot:
return annotation
def run(self, since=None):
- if since == None:
+ if since is None:
since = datetime.datetime.utcnow()
while True:
# XXX: better isoformat conversion?
diff --git a/python/fatcat_tools/transforms/elasticsearch.py b/python/fatcat_tools/transforms/elasticsearch.py
index bfc18f83..ec5891c3 100644
--- a/python/fatcat_tools/transforms/elasticsearch.py
+++ b/python/fatcat_tools/transforms/elasticsearch.py
@@ -1,6 +1,6 @@
import datetime
-from typing import Dict, List, Any, Optional
+from typing import Dict, Any, Optional
import tldextract
@@ -166,7 +166,7 @@ def release_to_elasticsearch(entity: ReleaseEntity, force_bool: bool = True) ->
if extra.get('is_oa'):
# NOTE: not actually setting this anywhere... but could
t['is_oa'] = True
- if extra.get('is_work_alias') != None:
+ if extra.get('is_work_alias') is not None:
t['is_work_alias'] = bool(extra.get('is_work_alias'))
if extra.get('longtail_oa'):
# sometimes set by GROBID/matcher
@@ -214,7 +214,7 @@ def release_to_elasticsearch(entity: ReleaseEntity, force_bool: bool = True) ->
for k in ('crossref', 'datacite', 'jalc'):
if k in extra:
t['doi_registrar'] = k
- if not 'doi_registrar' in t:
+ if 'doi_registrar' not in t:
t['doi_registrar'] = 'crossref'
if t['doi']:
@@ -511,12 +511,12 @@ def container_to_elasticsearch(entity, force_bool=True, stats=None):
def _type_of_edit(edit: EntityEdit) -> str:
- if edit.revision == None and edit.redirect_ident == None:
+ if edit.revision is None and edit.redirect_ident is None:
return 'delete'
elif edit.redirect_ident:
# redirect
return 'update'
- elif edit.prev_revision == None and edit.redirect_ident == None and edit.revision:
+ elif edit.prev_revision is None and edit.redirect_ident is None and edit.revision:
return 'create'
else:
return 'update'