From 6fa2d38be243531747241a3ae602069d507368d9 Mon Sep 17 00:00:00 2001 From: Bryan Newbold Date: Tue, 2 Nov 2021 17:55:15 -0700 Subject: lint: simple, safe inline lint fixes '==' vs 'is'; 'not a in b' vs 'a not in b'; etc --- python/fatcat_tools/cleanups/files.py | 2 +- python/fatcat_tools/harvest/harvest_common.py | 2 +- python/fatcat_tools/harvest/pubmed.py | 2 +- python/fatcat_tools/importers/arabesque.py | 4 +- python/fatcat_tools/importers/crossref.py | 3 +- python/fatcat_tools/importers/datacite.py | 4 +- python/fatcat_tools/importers/dblp_release.py | 4 +- python/fatcat_tools/importers/doaj_article.py | 2 +- python/fatcat_tools/importers/file_meta.py | 2 +- python/fatcat_tools/importers/fileset_generic.py | 2 +- python/fatcat_tools/importers/ingest.py | 12 +-- python/fatcat_tools/importers/matched.py | 4 +- python/fatcat_tools/importers/orcid.py | 2 +- python/fatcat_tools/importers/pubmed.py | 2 +- python/fatcat_tools/importers/shadow.py | 2 +- python/fatcat_tools/normal.py | 104 ++++++++++---------- python/fatcat_tools/reviewers/review_common.py | 2 +- python/fatcat_tools/transforms/elasticsearch.py | 10 +- python/fatcat_web/editing_routes.py | 14 +-- python/fatcat_web/entity_helpers.py | 4 +- python/fatcat_web/routes.py | 4 +- python/fatcat_web/search.py | 4 +- python/tests/api_filesets.py | 2 +- python/tests/citation_efficiency.py | 4 +- python/tests/clean_files.py | 2 +- python/tests/import_arxiv.py | 8 +- python/tests/import_common.py | 6 +- python/tests/import_crossref.py | 2 +- python/tests/import_datacite.py | 18 ++-- python/tests/import_doaj.py | 14 +-- python/tests/import_grobid_metadata.py | 6 +- python/tests/import_ingest.py | 2 +- python/tests/import_jalc.py | 4 +- python/tests/import_jstor.py | 14 +-- python/tests/import_pubmed.py | 22 ++--- python/tests/importer.py | 22 ++--- python/tests/transform_elasticsearch.py | 120 +++++++++++------------ python/tests/web_citation_csl.py | 2 +- python/tests/web_entity_views.py | 2 +- 39 files changed, 220 insertions(+), 221 deletions(-) diff --git a/python/fatcat_tools/cleanups/files.py b/python/fatcat_tools/cleanups/files.py index a40e4a28..10dd45cc 100644 --- a/python/fatcat_tools/cleanups/files.py +++ b/python/fatcat_tools/cleanups/files.py @@ -27,7 +27,7 @@ class FileCleaner(EntityCleaner): """ # URL has ://web.archive.org/web/None/ link => delete URL - entity.urls = [u for u in entity.urls if not '://web.archive.org/web/None/' in u.url] + entity.urls = [u for u in entity.urls if '://web.archive.org/web/None/' not in u.url] # URL has ://archive.org/ link with rel=repository => rel=archive for u in entity.urls: diff --git a/python/fatcat_tools/harvest/harvest_common.py b/python/fatcat_tools/harvest/harvest_common.py index bdae3054..5e7702d9 100644 --- a/python/fatcat_tools/harvest/harvest_common.py +++ b/python/fatcat_tools/harvest/harvest_common.py @@ -77,7 +77,7 @@ class HarvestState: current = start_date while current <= end_date: - if not current in self.completed: + if current not in self.completed: self.to_process.add(current) current += datetime.timedelta(days=1) diff --git a/python/fatcat_tools/harvest/pubmed.py b/python/fatcat_tools/harvest/pubmed.py index 92798a99..ee55f4eb 100644 --- a/python/fatcat_tools/harvest/pubmed.py +++ b/python/fatcat_tools/harvest/pubmed.py @@ -301,7 +301,7 @@ def xmlstream(filename, tag, encoding='utf-8'): Known vulnerabilities: https://docs.python.org/3/library/xml.html#xml-vulnerabilities """ def strip_ns(tag): - if not '}' in tag: + if '}' not in tag: return tag return tag.split('}')[1] diff --git a/python/fatcat_tools/importers/arabesque.py b/python/fatcat_tools/importers/arabesque.py index 79fb10d3..ccf35446 100644 --- a/python/fatcat_tools/importers/arabesque.py +++ b/python/fatcat_tools/importers/arabesque.py @@ -62,13 +62,13 @@ class ArabesqueMatchImporter(EntityImporter): def want(self, row): if self.require_grobid and not row['postproc_status'] == "200": return False - if (row['hit'] == True + if (row['hit'] is True and row['final_sha1'] and row['final_timestamp'] and row['final_timestamp'] != "-" and len(row['final_timestamp']) == 14 and row['final_mimetype'] - and row['hit'] == True + and row['hit'] is True and row['identifier']): return True else: diff --git a/python/fatcat_tools/importers/crossref.py b/python/fatcat_tools/importers/crossref.py index bd72a781..38c19a63 100644 --- a/python/fatcat_tools/importers/crossref.py +++ b/python/fatcat_tools/importers/crossref.py @@ -205,7 +205,7 @@ class CrossrefImporter(EntityImporter): return None # Do require the 'title' keys to exist, as release entities do - if (not 'title' in obj) or (not obj['title']): + if ('title' not in obj) or (not obj['title']): self.counts['skip-blank-title'] += 1 return None @@ -429,7 +429,6 @@ class CrossrefImporter(EntityImporter): release_year = raw_date[0] release_date = None - original_title: Optional[str] = None if obj.get('original-title'): ot = obj.get('original-title') diff --git a/python/fatcat_tools/importers/datacite.py b/python/fatcat_tools/importers/datacite.py index eb49596f..1593e6f8 100644 --- a/python/fatcat_tools/importers/datacite.py +++ b/python/fatcat_tools/importers/datacite.py @@ -319,7 +319,7 @@ class DataciteImporter(EntityImporter): # 17871 | translator # 10870584 | # (4 rows) - # + # # Related: https://guide.fatcat.wiki/entity_release.html -- role # (string, of a set): the type of contribution, from a controlled # vocabulary. TODO: vocabulary needs review. @@ -1046,7 +1046,7 @@ def find_original_language_title(item, min_length=4, max_questionmarks=3): Example input: {'title': 'Some title', 'original_language_title': 'Some title'} """ - if not 'original_language_title' in item: + if 'original_language_title' not in item: return None title = item.get('title') if not title: diff --git a/python/fatcat_tools/importers/dblp_release.py b/python/fatcat_tools/importers/dblp_release.py index 670f190b..fa5cb842 100644 --- a/python/fatcat_tools/importers/dblp_release.py +++ b/python/fatcat_tools/importers/dblp_release.py @@ -93,7 +93,7 @@ class DblpReleaseImporter(EntityImporter): return self._dblp_container_map.get(prefix) def want(self, xml_elem): - if not xml_elem.name in self.ELEMENT_TYPES: + if xml_elem.name not in self.ELEMENT_TYPES: self.counts['skip-type'] += 1 return False if not xml_elem.get('key'): @@ -243,7 +243,7 @@ class DblpReleaseImporter(EntityImporter): # dblp-specific extra dblp_extra = dict(type=dblp_type) note = clean_str(xml_elem.note and xml_elem.note.text) - if note and not 'base-search.net' in note: + if note and 'base-search.net' not in note: dblp_extra['note'] = note if part_of_key: dblp_extra['part_of_key'] = part_of_key diff --git a/python/fatcat_tools/importers/doaj_article.py b/python/fatcat_tools/importers/doaj_article.py index 191a65d8..833089ae 100644 --- a/python/fatcat_tools/importers/doaj_article.py +++ b/python/fatcat_tools/importers/doaj_article.py @@ -73,7 +73,7 @@ class DoajArticleImporter(EntityImporter): } """ - if not obj or not isinstance(obj, dict) or not 'bibjson' in obj: + if not obj or not isinstance(obj, dict) or 'bibjson' not in obj: self.counts['skip-empty'] += 1 return None diff --git a/python/fatcat_tools/importers/file_meta.py b/python/fatcat_tools/importers/file_meta.py index 9f4b9e06..3d9f5923 100644 --- a/python/fatcat_tools/importers/file_meta.py +++ b/python/fatcat_tools/importers/file_meta.py @@ -35,7 +35,7 @@ class FileMetaImporter(EntityImporter): def parse_record(self, row): # bezerk mode doesn't make sense for this importer - assert self.bezerk_mode == False + assert self.bezerk_mode is False file_meta = row fe = fatcat_openapi_client.FileEntity( diff --git a/python/fatcat_tools/importers/fileset_generic.py b/python/fatcat_tools/importers/fileset_generic.py index f0ad5460..13352fb2 100644 --- a/python/fatcat_tools/importers/fileset_generic.py +++ b/python/fatcat_tools/importers/fileset_generic.py @@ -30,7 +30,7 @@ class FilesetImporter(EntityImporter): **kwargs) # bezerk mode doesn't make sense for this importer - assert self.bezerk_mode == False + assert self.bezerk_mode is False def want(self, row): if not row.get('release_ids'): diff --git a/python/fatcat_tools/importers/ingest.py b/python/fatcat_tools/importers/ingest.py index cb663330..4d4efc0a 100644 --- a/python/fatcat_tools/importers/ingest.py +++ b/python/fatcat_tools/importers/ingest.py @@ -78,7 +78,7 @@ class IngestFileResultImporter(EntityImporter): Sandcrawler ingest-specific part of want(). Generic across file and webcapture ingest. """ - if row.get('hit') != True: + if row.get('hit') is not True: self.counts['skip-hit'] += 1 return False source = row['request'].get('ingest_request_source') @@ -178,9 +178,9 @@ class IngestFileResultImporter(EntityImporter): } # work around old schema - if not 'terminal_url' in terminal: + if 'terminal_url' not in terminal: terminal['terminal_url'] = terminal['url'] - if not 'terminal_dt' in terminal: + if 'terminal_dt' not in terminal: terminal['terminal_dt'] = terminal['dt'] # convert CDX-style digits to ISO-style timestamp @@ -358,7 +358,7 @@ class SavePaperNowFileImporter(IngestFileResultImporter): self.counts['skip-not-savepapernow'] += 1 return False - if row.get('hit') != True: + if row.get('hit') is not True: self.counts['skip-hit'] += 1 return False @@ -459,7 +459,7 @@ class IngestWebResultImporter(IngestFileResultImporter): for resource in row.get('html_resources', []): timestamp = resource['timestamp'] - if not "+" in timestamp and not "Z" in timestamp: + if "+" not in timestamp and "Z" not in timestamp: timestamp += "Z" wc_cdx.append(fatcat_openapi_client.WebcaptureCdxLine( surt=resource['surt'], @@ -808,7 +808,7 @@ class SavePaperNowFilesetImporter(IngestFilesetResultImporter): self.counts['skip-not-savepapernow'] += 1 return False - if row.get('hit') != True: + if row.get('hit') is not True: self.counts['skip-hit'] += 1 return False diff --git a/python/fatcat_tools/importers/matched.py b/python/fatcat_tools/importers/matched.py index e0e4fc3c..09807276 100644 --- a/python/fatcat_tools/importers/matched.py +++ b/python/fatcat_tools/importers/matched.py @@ -94,7 +94,7 @@ class MatchedImporter(EntityImporter): urls = set() for url in obj.get('urls', []): url = make_rel_url(url, default_link_rel=self.default_link_rel) - if url != None: + if url is not None: urls.add(url) for cdx in obj.get('cdx', []): original = cdx['url'] @@ -104,7 +104,7 @@ class MatchedImporter(EntityImporter): original) urls.add(("webarchive", wayback)) url = make_rel_url(original, default_link_rel=self.default_link_rel) - if url != None: + if url is not None: urls.add(url) urls = [fatcat_openapi_client.FileUrl(rel=rel, url=url) for (rel, url) in urls] if len(urls) == 0: diff --git a/python/fatcat_tools/importers/orcid.py b/python/fatcat_tools/importers/orcid.py index 21feea9e..4412a46d 100644 --- a/python/fatcat_tools/importers/orcid.py +++ b/python/fatcat_tools/importers/orcid.py @@ -40,7 +40,7 @@ class OrcidImporter(EntityImporter): returns a CreatorEntity """ - if not 'person' in obj: + if 'person' not in obj: return False name = obj['person']['name'] diff --git a/python/fatcat_tools/importers/pubmed.py b/python/fatcat_tools/importers/pubmed.py index c9907c5e..00ad54d0 100644 --- a/python/fatcat_tools/importers/pubmed.py +++ b/python/fatcat_tools/importers/pubmed.py @@ -590,7 +590,7 @@ class PubmedImporter(EntityImporter): orcid = orcid.replace("http://orcid.org/", "") elif orcid.startswith("https://orcid.org/"): orcid = orcid.replace("https://orcid.org/", "") - elif not '-' in orcid: + elif '-' not in orcid: orcid = "{}-{}-{}-{}".format( orcid[0:4], orcid[4:8], diff --git a/python/fatcat_tools/importers/shadow.py b/python/fatcat_tools/importers/shadow.py index fa9b4d10..77205cee 100644 --- a/python/fatcat_tools/importers/shadow.py +++ b/python/fatcat_tools/importers/shadow.py @@ -95,7 +95,7 @@ class ShadowLibraryImporter(EntityImporter): urls = [] if obj.get('cdx'): url = make_rel_url(obj['cdx']['url'], default_link_rel=self.default_link_rel) - if url != None: + if url is not None: urls.append(url) wayback = "https://web.archive.org/web/{}/{}".format( obj['cdx']['datetime'], diff --git a/python/fatcat_tools/normal.py b/python/fatcat_tools/normal.py index eb61c326..24c0bb0a 100644 --- a/python/fatcat_tools/normal.py +++ b/python/fatcat_tools/normal.py @@ -74,19 +74,19 @@ def test_clean_doi(): assert clean_doi("10.1234/asdf ") == "10.1234/asdf" assert clean_doi("10.1037//0002-9432.72.1.50") == "10.1037/0002-9432.72.1.50" assert clean_doi("10.1037/0002-9432.72.1.50") == "10.1037/0002-9432.72.1.50" - assert clean_doi("10.23750/abm.v88i2 -s.6506") == None - assert clean_doi("10.17167/mksz.2017.2.129–155") == None + assert clean_doi("10.23750/abm.v88i2 -s.6506") is None + assert clean_doi("10.17167/mksz.2017.2.129–155") is None assert clean_doi("http://doi.org/10.1234/asdf ") == "10.1234/asdf" assert clean_doi("https://dx.doi.org/10.1234/asdf ") == "10.1234/asdf" assert clean_doi("doi:10.1234/asdf ") == "10.1234/asdf" - assert clean_doi("doi:10.1234/ asdf ") == None - assert clean_doi("10.4149/gpb¬_2017042") == None # "logical negation" character - assert clean_doi("10.6002/ect.2020.häyry") == None # this example via pubmed (pmid:32519616) - assert clean_doi("10.30466/vrf.2019.98547.2350\u200e") == None - assert clean_doi("10.12016/j.issn.2096⁃1456.2017.06.014") == None - assert clean_doi("10.4025/diálogos.v17i2.36030") == None - assert clean_doi("10.19027/jai.10.106‒115") == None - assert clean_doi("10.15673/атбп2312-3125.17/2014.26332") == None + assert clean_doi("doi:10.1234/ asdf ") is None + assert clean_doi("10.4149/gpb¬_2017042") is None # "logical negation" character + assert clean_doi("10.6002/ect.2020.häyry") is None # this example via pubmed (pmid:32519616) + assert clean_doi("10.30466/vrf.2019.98547.2350\u200e") is None + assert clean_doi("10.12016/j.issn.2096⁃1456.2017.06.014") is None + assert clean_doi("10.4025/diálogos.v17i2.36030") is None + assert clean_doi("10.19027/jai.10.106‒115") is None + assert clean_doi("10.15673/атбп2312-3125.17/2014.26332") is None assert clean_doi("10.7326/M20-6817") == "10.7326/m20-6817" @@ -129,17 +129,17 @@ def test_clean_arxiv_id(): assert clean_arxiv_id("arxiv:0806.2878v1") == "0806.2878v1" assert clean_arxiv_id("arXiv:0806.2878v1") == "0806.2878v1" - assert clean_arxiv_id("hep-TH/9901001v1") == None - assert clean_arxiv_id("hßp-th/9901001v1") == None - assert clean_arxiv_id("math.CA/06l1800v2") == None - assert clean_arxiv_id("mßth.ca/0611800v2") == None - assert clean_arxiv_id("MATH.CA/0611800v2") == None + assert clean_arxiv_id("hep-TH/9901001v1") is None + assert clean_arxiv_id("hßp-th/9901001v1") is None + assert clean_arxiv_id("math.CA/06l1800v2") is None + assert clean_arxiv_id("mßth.ca/0611800v2") is None + assert clean_arxiv_id("MATH.CA/0611800v2") is None assert clean_arxiv_id("0806.2878v23") == "0806.2878v23" # ? - assert clean_arxiv_id("0806.2878v") == None + assert clean_arxiv_id("0806.2878v") is None assert clean_arxiv_id("0806.2878") == "0806.2878" - assert clean_arxiv_id("006.2878v1") == None - assert clean_arxiv_id("0806.v1") == None - assert clean_arxiv_id("08062878v1") == None + assert clean_arxiv_id("006.2878v1") is None + assert clean_arxiv_id("0806.v1") is None + assert clean_arxiv_id("08062878v1") is None def clean_wikidata_qid(raw): if not raw: @@ -155,13 +155,13 @@ def test_clean_wikidata_qid(): assert clean_wikidata_qid("Q1234") == "Q1234" assert clean_wikidata_qid("Q1") == "Q1" assert clean_wikidata_qid(" Q1234 ") == "Q1234" - assert clean_wikidata_qid(" Q1 234 ") == None - assert clean_wikidata_qid("q1234") == None - assert clean_wikidata_qid("1234 ") == None - assert clean_wikidata_qid("Q0123") == None - assert clean_wikidata_qid("PMC123") == None - assert clean_wikidata_qid("qfba3") == None - assert clean_wikidata_qid("") == None + assert clean_wikidata_qid(" Q1 234 ") is None + assert clean_wikidata_qid("q1234") is None + assert clean_wikidata_qid("1234 ") is None + assert clean_wikidata_qid("Q0123") is None + assert clean_wikidata_qid("PMC123") is None + assert clean_wikidata_qid("qfba3") is None + assert clean_wikidata_qid("") is None def clean_pmid(raw: str) -> Optional[str]: if not raw: @@ -176,9 +176,9 @@ def clean_pmid(raw: str) -> Optional[str]: def test_clean_pmid(): assert clean_pmid("1234") == "1234" assert clean_pmid("1234 ") == "1234" - assert clean_pmid("PMC123") == None - assert clean_pmid("qfba3") == None - assert clean_pmid("") == None + assert clean_pmid("PMC123") is None + assert clean_pmid("qfba3") is None + assert clean_pmid("") is None def clean_pmcid(raw: str) -> Optional[str]: if not raw: @@ -206,9 +206,9 @@ def clean_sha1(raw: str) -> Optional[str]: def test_clean_sha1(): assert clean_sha1("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") == "0fba3fba0e1937aa0297de3836b768b5dfb23d7b" assert clean_sha1("0fba3fba0e1937aa0297de3836b768b5dfb23d7b ") == "0fba3fba0e1937aa0297de3836b768b5dfb23d7b" - assert clean_sha1("fba3fba0e1937aa0297de3836b768b5dfb23d7b") == None - assert clean_sha1("qfba3fba0e1937aa0297de3836b768b5dfb23d7b") == None - assert clean_sha1("0fba3fb a0e1937aa0297de3836b768b5dfb23d7b") == None + assert clean_sha1("fba3fba0e1937aa0297de3836b768b5dfb23d7b") is None + assert clean_sha1("qfba3fba0e1937aa0297de3836b768b5dfb23d7b") is None + assert clean_sha1("0fba3fb a0e1937aa0297de3836b768b5dfb23d7b") is None def clean_sha256(raw: str) -> Optional[str]: raw = raw.strip().lower() @@ -223,7 +223,7 @@ def clean_sha256(raw: str) -> Optional[str]: def test_clean_sha256(): assert clean_sha256("6cc853f2ae75696b2e45f476c76b946b0fc2df7c52bb38287cb074aceb77bc7f") == "6cc853f2ae75696b2e45f476c76b946b0fc2df7c52bb38287cb074aceb77bc7f" - assert clean_sha256("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") == None + assert clean_sha256("0fba3fba0e1937aa0297de3836b768b5dfb23d7b") is None ISSN_REGEX = re.compile(r"^\d{4}-\d{3}[0-9X]$") @@ -240,8 +240,8 @@ def clean_issn(raw: str) -> Optional[str]: def test_clean_issn(): assert clean_issn("1234-4567") == "1234-4567" assert clean_issn("1234-456X") == "1234-456X" - assert clean_issn("134-4567") == None - assert clean_issn("123X-4567") == None + assert clean_issn("134-4567") is None + assert clean_issn("123X-4567") is None ISBN13_REGEX = re.compile(r"^97(?:8|9)-\d{1,5}-\d{1,7}-\d{1,6}-\d$") @@ -257,7 +257,7 @@ def test_clean_isbn13(): assert clean_isbn13("978-1-56619-909-4") == "978-1-56619-909-4" assert clean_isbn13("978-1-4028-9462-6") == "978-1-4028-9462-6" assert clean_isbn13("978-1-56619-909-4 ") == "978-1-56619-909-4" - assert clean_isbn13("9781566199094") == None + assert clean_isbn13("9781566199094") is None ORCID_REGEX = re.compile(r"^\d{4}-\d{4}-\d{4}-\d{3}[\dX]$") @@ -273,8 +273,8 @@ def test_clean_orcid(): assert clean_orcid("0123-4567-3456-6789") == "0123-4567-3456-6789" assert clean_orcid("0123-4567-3456-678X") == "0123-4567-3456-678X" assert clean_orcid("0123-4567-3456-6789 ") == "0123-4567-3456-6789" - assert clean_orcid("01234567-3456-6780") == None - assert clean_orcid("0x23-4567-3456-6780") == None + assert clean_orcid("01234567-3456-6780") is None + assert clean_orcid("0x23-4567-3456-6780") is None HDL_REGEX = re.compile(r"^\d+(\.\d+)*/\S+$") @@ -304,10 +304,10 @@ def test_clean_hdl(): assert clean_hdl("http://hdl.handle.net/20.500.23456/ABC/DUMMY") == "20.500.23456/abc/dummy" assert clean_hdl("21.1234/aksjdfh") == "21.1234/aksjdfh" assert clean_hdl("2381/12775") == "2381/12775" - assert clean_hdl("10.1234/aksjdfh") == None - assert clean_hdl("20.1234") == None - assert clean_hdl("20.1234/") == None - assert clean_hdl("20./asdf") == None + assert clean_hdl("10.1234/aksjdfh") is None + assert clean_hdl("20.1234") is None + assert clean_hdl("20.1234/") is None + assert clean_hdl("20./asdf") is None def clean_str(thing: Optional[str], force_xml: bool = False) -> Optional[str]: @@ -337,9 +337,9 @@ def clean_str(thing: Optional[str], force_xml: bool = False) -> Optional[str]: def test_clean_str(): - assert clean_str(None) == None - assert clean_str('') == None - assert clean_str('1') == None + assert clean_str(None) is None + assert clean_str('') is None + assert clean_str('1') is None assert clean_str('123') == '123' assert clean_str('a&b') == 'a&b' assert clean_str('a&b') == 'a&b' @@ -410,9 +410,9 @@ def parse_month(raw: Optional[str]) -> Optional[int]: def test_parse_month() -> None: - assert parse_month(None) == None - assert parse_month("") == None - assert parse_month("0") == None + assert parse_month(None) is None + assert parse_month("") is None + assert parse_month("0") is None assert parse_month("10") == 10 assert parse_month("jan") == 1 assert parse_month("September") == 9 @@ -435,7 +435,7 @@ def detect_text_lang(raw: str) -> Optional[str]: return None def test_detect_text_lang() -> None: - assert detect_text_lang("") == None + assert detect_text_lang("") is None EN_SAMPLE = "this is a string of English text for testing" assert detect_text_lang(EN_SAMPLE) == "en" JA_SAMPLE = "モーラの種類は、以下に示すように111程度存在する。ただし、研究者により数え方が少しずつ異なる。" @@ -465,9 +465,9 @@ def parse_lang_name(raw: Optional[str]) -> Optional[str]: def test_parse_lang_name() -> None: - assert parse_lang_name(None) == None - assert parse_lang_name("") == None - assert parse_lang_name("asdf ") == None + assert parse_lang_name(None) is None + assert parse_lang_name("") is None + assert parse_lang_name("asdf ") is None assert parse_lang_name("english") == "en" assert parse_lang_name("ENGLISH") == "en" assert parse_lang_name("asdf blah") is None diff --git a/python/fatcat_tools/reviewers/review_common.py b/python/fatcat_tools/reviewers/review_common.py index 232dc57d..b4930c19 100644 --- a/python/fatcat_tools/reviewers/review_common.py +++ b/python/fatcat_tools/reviewers/review_common.py @@ -92,7 +92,7 @@ class ReviewBot: return annotation def run(self, since=None): - if since == None: + if since is None: since = datetime.datetime.utcnow() while True: # XXX: better isoformat conversion? diff --git a/python/fatcat_tools/transforms/elasticsearch.py b/python/fatcat_tools/transforms/elasticsearch.py index bfc18f83..ec5891c3 100644 --- a/python/fatcat_tools/transforms/elasticsearch.py +++ b/python/fatcat_tools/transforms/elasticsearch.py @@ -1,6 +1,6 @@ import datetime -from typing import Dict, List, Any, Optional +from typing import Dict, Any, Optional import tldextract @@ -166,7 +166,7 @@ def release_to_elasticsearch(entity: ReleaseEntity, force_bool: bool = True) -> if extra.get('is_oa'): # NOTE: not actually setting this anywhere... but could t['is_oa'] = True - if extra.get('is_work_alias') != None: + if extra.get('is_work_alias') is not None: t['is_work_alias'] = bool(extra.get('is_work_alias')) if extra.get('longtail_oa'): # sometimes set by GROBID/matcher @@ -214,7 +214,7 @@ def release_to_elasticsearch(entity: ReleaseEntity, force_bool: bool = True) -> for k in ('crossref', 'datacite', 'jalc'): if k in extra: t['doi_registrar'] = k - if not 'doi_registrar' in t: + if 'doi_registrar' not in t: t['doi_registrar'] = 'crossref' if t['doi']: @@ -511,12 +511,12 @@ def container_to_elasticsearch(entity, force_bool=True, stats=None): def _type_of_edit(edit: EntityEdit) -> str: - if edit.revision == None and edit.redirect_ident == None: + if edit.revision is None and edit.redirect_ident is None: return 'delete' elif edit.redirect_ident: # redirect return 'update' - elif edit.prev_revision == None and edit.redirect_ident == None and edit.revision: + elif edit.prev_revision is None and edit.redirect_ident is None and edit.revision: return 'create' else: return 'update' diff --git a/python/fatcat_web/editing_routes.py b/python/fatcat_web/editing_routes.py index d888735a..5a97dfc4 100644 --- a/python/fatcat_web/editing_routes.py +++ b/python/fatcat_web/editing_routes.py @@ -188,7 +188,7 @@ def generic_entity_edit(editgroup_id, entity_type, existing_ident, edit_template raise ae # check that editgroup is edit-able - if editgroup.changelog_index != None: + if editgroup.changelog_index is not None: abort(400, "Editgroup already merged") # fetch entity (if set) or 404 @@ -285,7 +285,7 @@ def generic_entity_edit(editgroup_id, entity_type, existing_ident, edit_template raise NotImplementedError editor_editgroups = api.get_editor_editgroups(session['editor']['editor_id'], limit=20) - potential_editgroups = [e for e in editor_editgroups if e.changelog_index == None and e.submitted == None] + potential_editgroups = [e for e in editor_editgroups if e.changelog_index is None and e.submitted is None] if not form.is_submitted(): # default to most recent not submitted, fallback to "create new" @@ -313,7 +313,7 @@ def generic_entity_toml_edit(editgroup_id, entity_type, existing_ident, edit_tem raise ae # check that editgroup is edit-able - if editgroup.changelog_index != None: + if editgroup.changelog_index is not None: flash("Editgroup already merged") abort(400) @@ -381,7 +381,7 @@ def generic_entity_toml_edit(editgroup_id, entity_type, existing_ident, edit_tem form = EntityTomlForm.from_entity(existing) editor_editgroups = api.get_editor_editgroups(session['editor']['editor_id'], limit=20) - potential_editgroups = [e for e in editor_editgroups if e.changelog_index == None and e.submitted == None] + potential_editgroups = [e for e in editor_editgroups if e.changelog_index is None and e.submitted is None] if not form.is_submitted(): # default to most recent not submitted, fallback to "create new" @@ -410,7 +410,7 @@ def generic_entity_delete(editgroup_id: Optional[str], entity_type: str, existin raise ae # check that editgroup is edit-able - if editgroup.changelog_index != None: + if editgroup.changelog_index is not None: flash("Editgroup already merged") abort(400) @@ -462,7 +462,7 @@ def generic_entity_delete(editgroup_id: Optional[str], entity_type: str, existin form = EntityTomlForm.from_entity(existing) editor_editgroups = api.get_editor_editgroups(session['editor']['editor_id'], limit=20) - potential_editgroups = [e for e in editor_editgroups if e.changelog_index == None and e.submitted == None] + potential_editgroups = [e for e in editor_editgroups if e.changelog_index is None and e.submitted is None] if not form.is_submitted(): # default to most recent not submitted, fallback to "create new" @@ -484,7 +484,7 @@ def generic_edit_delete(editgroup_id, entity_type, edit_id): abort(ae.status) # check that editgroup is edit-able - if editgroup.changelog_index != None: + if editgroup.changelog_index is not None: flash("Editgroup already merged") abort(400) diff --git a/python/fatcat_web/entity_helpers.py b/python/fatcat_web/entity_helpers.py index 26371341..5522f3b5 100644 --- a/python/fatcat_web/entity_helpers.py +++ b/python/fatcat_web/entity_helpers.py @@ -45,7 +45,7 @@ def enrich_fileset_entity(entity): if entity.state in ('redirect', 'deleted'): return entity entity._total_size = None - if entity.manifest != None: + if entity.manifest is not None: entity._total_size = sum([f.size for f in entity.manifest]) or 0 return entity @@ -93,7 +93,7 @@ def enrich_release_entity(entity): c.role in ('author', None) and (c.surname or c.raw_name or (c.creator and c.creator.surname)) ] - entity._authors = sorted(authors, key=lambda c: (c.index == None and 99999999) or c.index) + entity._authors = sorted(authors, key=lambda c: (c.index is None and 99999999) or c.index) # need authors, title for citeproc to work entity._can_citeproc = bool(entity._authors) and bool(entity.title) if entity.abstracts: diff --git a/python/fatcat_web/routes.py b/python/fatcat_web/routes.py index 9795adf7..fc94da66 100644 --- a/python/fatcat_web/routes.py +++ b/python/fatcat_web/routes.py @@ -738,7 +738,7 @@ def release_save(ident): @app.route('/search', methods=['GET', 'POST']) def generic_search(): - if not 'q' in request.args.keys(): + if 'q' not in request.args.keys(): return redirect('/release/search') query = request.args.get('q').strip() @@ -1080,7 +1080,7 @@ def change_username(): if not app.testing: app.csrf.protect() # show the user a list of login options - if not 'username' in request.form: + if 'username' not in request.form: abort(400) # on behalf of user... user_api = auth_api(session['api_token']) diff --git a/python/fatcat_web/search.py b/python/fatcat_web/search.py index 94c7431c..95f1f5c9 100644 --- a/python/fatcat_web/search.py +++ b/python/fatcat_web/search.py @@ -417,7 +417,7 @@ def get_elastic_search_coverage(query: ReleaseQuery) -> dict: preservation_bucket = agg_to_dict(resp.aggregations.preservation) preservation_bucket['total'] = _hits_total_int(resp.hits.total) for k in ('bright', 'dark', 'shadows_only', 'none'): - if not k in preservation_bucket: + if k not in preservation_bucket: preservation_bucket[k] = 0 if app.config['FATCAT_MERGE_SHADOW_PRESERVATION']: preservation_bucket['none'] += preservation_bucket['shadows_only'] @@ -490,7 +490,7 @@ def get_elastic_container_stats(ident, issnl=None, es_client=None, es_index=None preservation_bucket = agg_to_dict(resp.aggregations.preservation) preservation_bucket['total'] = _hits_total_int(resp.hits.total) for k in ('bright', 'dark', 'shadows_only', 'none'): - if not k in preservation_bucket: + if k not in preservation_bucket: preservation_bucket[k] = 0 if merge_shadows: preservation_bucket['none'] += preservation_bucket['shadows_only'] diff --git a/python/tests/api_filesets.py b/python/tests/api_filesets.py index c69d567d..f351dc9f 100644 --- a/python/tests/api_filesets.py +++ b/python/tests/api_filesets.py @@ -80,7 +80,7 @@ def test_fileset_examples(api): assert fs3.urls[1].rel == 'archive' assert fs3.manifest[1].md5 == 'f4de91152c7ab9fdc2a128f962faebff' assert fs3.manifest[1].mimetype == 'application/gzip' - assert fs3.manifest[1].extra != None + assert fs3.manifest[1].extra is not None assert fs3.releases[0].ident assert fs3.releases[0].abstracts is None assert fs3.releases[0].refs is None diff --git a/python/tests/citation_efficiency.py b/python/tests/citation_efficiency.py index f8807db6..1d57dfeb 100644 --- a/python/tests/citation_efficiency.py +++ b/python/tests/citation_efficiency.py @@ -58,7 +58,7 @@ def test_citation_targets(api): assert r2.refs[1].key == "second" assert r2.refs[0].index == 0 # TODO: one-indexing? assert r2.refs[1].index == 1 - assert r2.refs[0].target_release_id == None + assert r2.refs[0].target_release_id is None assert r2.refs[1].target_release_id == r1.ident assert len(r2.refs) == 2 @@ -81,7 +81,7 @@ def test_citation_empty_array(api): assert r1.refs == r2.refs r1b = api.get_release(r1.ident, hide="refs") - assert r1b.refs == None + assert r1b.refs is None def test_citation_encoding(api): # escape-only changes (eg, \u1234 whatever for ASCII) diff --git a/python/tests/clean_files.py b/python/tests/clean_files.py index ce1102be..f752bc2c 100644 --- a/python/tests/clean_files.py +++ b/python/tests/clean_files.py @@ -28,7 +28,7 @@ def test_url_cleanups(file_cleaner): # remove None wayback links assert len(f.urls) == 2 for u in f.urls: - assert not 'web/None' in u.url + assert 'web/None' not in u.url assert f == file_cleaner.clean_entity(f) assert f == file_cleaner.clean_entity(copy.deepcopy(f)) diff --git a/python/tests/import_arxiv.py b/python/tests/import_arxiv.py index 9306e67c..3ed1ab29 100644 --- a/python/tests/import_arxiv.py +++ b/python/tests/import_arxiv.py @@ -50,8 +50,8 @@ def test_arxiv_xml_parse(arxiv_importer): print(r2.extra) assert r1.work_id == r2.work_id assert r1.title == "Martingale theory for housekeeping heat" - assert r1.subtitle == None - assert r1.original_title == None + assert r1.subtitle is None + assert r1.original_title is None assert r1.release_type == "article" assert r1.release_stage == "submitted" assert r2.release_stage == "accepted" @@ -60,7 +60,7 @@ def test_arxiv_xml_parse(arxiv_importer): assert r2.version == "v2" assert r1.ext_ids.arxiv == "1810.09584v1" assert r2.ext_ids.arxiv == "1810.09584v2" - assert r1.ext_ids.doi == None + assert r1.ext_ids.doi is None assert r2.ext_ids.doi == "10.1209/0295-5075/124/60006" assert r1.release_year == 2018 assert str(r1.release_date) == "2018-10-22" @@ -71,7 +71,7 @@ def test_arxiv_xml_parse(arxiv_importer): assert len(r1.contribs) == 4 assert r1.extra['arxiv']['categories'] == ['cond-mat.stat-mech', 'physics.bio-ph', 'physics.data-an'] assert r1.extra['arxiv']['base_id'] == '1810.09584' - assert r1.extra['superceded'] == True + assert r1.extra['superceded'] is True assert r1.contribs[0].raw_name == "Raphael Chetrite" assert r1.contribs[0].role == "author" diff --git a/python/tests/import_common.py b/python/tests/import_common.py index d0db014e..69e51432 100644 --- a/python/tests/import_common.py +++ b/python/tests/import_common.py @@ -37,7 +37,7 @@ def test_fuzzy_match_none(entity_importer, mocker) -> None: ) resp = entity_importer.match_existing_release_fuzzy(release) - assert resp == None + assert resp is None def test_fuzzy_match_different(entity_importer, mocker) -> None: """ @@ -71,8 +71,8 @@ def test_fuzzy_match_different(entity_importer, mocker) -> None: match_raw.side_effect = [[r3]] resp = entity_importer.match_existing_release_fuzzy(r1) - assert resp == None + assert resp is None match_raw.side_effect = [[]] resp = entity_importer.match_existing_release_fuzzy(r1) - assert resp == None + assert resp is None diff --git a/python/tests/import_crossref.py b/python/tests/import_crossref.py index ebe6942f..a2c8681b 100644 --- a/python/tests/import_crossref.py +++ b/python/tests/import_crossref.py @@ -87,7 +87,7 @@ def test_crossref_dict_parse(crossref_importer): assert r.ext_ids.doi == "10.1002/(sici)1097-461x(1998)66:4<261::aid-qua1>3.0.co;2-t" assert r.ext_ids.isbn13 == "978-3-16-148410-0" assert r.language == "fr" - assert r.subtitle == None + assert r.subtitle is None assert 'subtitle' not in r.extra assert 'subtitle' not in r.extra['crossref'] assert 'funder' not in r.extra diff --git a/python/tests/import_datacite.py b/python/tests/import_datacite.py index edbb6617..a92a732d 100644 --- a/python/tests/import_datacite.py +++ b/python/tests/import_datacite.py @@ -323,7 +323,7 @@ def test_datacite_spammy_title(datacite_importer): Heroes: Rising [2020]Full Movie Watch Online And Free Download""", "attributes": {"doi": "10.1234/1234"}}) - assert r == False + assert r is False def test_datacite_importer(datacite_importer): last_index = datacite_importer.api.get_changelog(limit=1)[0].index @@ -367,13 +367,13 @@ def test_datacite_dict_parse(datacite_importer): ) assert r.release_type == "article" assert r.release_stage == "published" - assert r.license_slug == None - assert r.original_title == None + assert r.license_slug is None + assert r.original_title is None assert r.ext_ids.doi == "10.18730/8dym9" - assert r.ext_ids.isbn13 == None + assert r.ext_ids.isbn13 is None assert r.language == "en" - assert r.subtitle == None - assert r.release_date == None + assert r.subtitle is None + assert r.release_date is None assert r.release_year == 1986 assert "subtitle" not in r.extra assert "subtitle" not in r.extra["datacite"] @@ -388,10 +388,10 @@ def test_datacite_dict_parse(datacite_importer): assert len(r.abstracts[0].content) == 421 assert len(r.contribs) == 2 assert r.contribs[0].raw_name == "GLIS Of The ITPGRFA" - assert r.contribs[0].given_name == None - assert r.contribs[0].surname == None + assert r.contribs[0].given_name is None + assert r.contribs[0].surname is None assert len(r.refs) == 0 - assert r.version == None + assert r.version is None def test_datacite_conversions(datacite_importer): diff --git a/python/tests/import_doaj.py b/python/tests/import_doaj.py index 17a23257..72a3acb8 100644 --- a/python/tests/import_doaj.py +++ b/python/tests/import_doaj.py @@ -128,16 +128,16 @@ def test_doaj_dict_parse(doaj_importer): assert r.release_type == "article-journal" assert r.release_stage == "published" assert r.license_slug == "cc-by-nc-nd" - assert r.original_title == None + assert r.original_title is None assert r.ext_ids.doi == "10.1016/j.matdes.2016.06.110" assert r.ext_ids.doaj == "e58f08a11ecb495ead55a44ad4f89808" - assert r.subtitle == None - assert r.release_date == None + assert r.subtitle is None + assert r.release_date is None assert r.release_year == 2016 assert r.volume == "108" - assert r.number == None + assert r.number is None assert r.pages == "608-617" - assert r.version == None + assert r.version is None assert r.language == "en" # matched by ISSN, so wouldn't be defined normally assert r.extra['container_name'] == "Materials & Design" @@ -145,8 +145,8 @@ def test_doaj_dict_parse(doaj_importer): assert len(r.abstracts[0].content) == 1033 assert len(r.contribs) == 5 assert r.contribs[0].raw_name == "Xinfeng Li" - assert r.contribs[0].given_name == None - assert r.contribs[0].surname == None + assert r.contribs[0].given_name is None + assert r.contribs[0].surname is None assert not r.refs #print(r.extra) diff --git a/python/tests/import_grobid_metadata.py b/python/tests/import_grobid_metadata.py index 52284b89..87cb8ef0 100644 --- a/python/tests/import_grobid_metadata.py +++ b/python/tests/import_grobid_metadata.py @@ -28,9 +28,9 @@ def test_grobid_metadata_parse(grobid_metadata_importer): assert re.contribs[0].raw_name == "Wahyu Ary" assert re.contribs[0].given_name == "Wahyu" assert re.contribs[0].surname == "Ary" - assert re.publisher == None + assert re.publisher is None if re.extra: - assert re.extra.get('container_name') == None + assert re.extra.get('container_name') is None assert len(re.refs) == 27 def test_file_metadata_parse(grobid_metadata_importer): @@ -43,7 +43,7 @@ def test_file_metadata_parse(grobid_metadata_importer): random_sha1, json.loads(raw[1]), raw[2], int(raw[3])) assert fe #assert fe.sha1 == "d4a841744719518bf8bdd5d91576ccedc55efbb5" # "sha1:2SUEC5CHDFIYX6F52XMRK5WM5XCV565V" - assert fe.md5 == None + assert fe.md5 is None assert fe.mimetype == "application/pdf" assert fe.size == 142710 assert fe.urls[1].url.startswith("http://via.library.depaul.edu") diff --git a/python/tests/import_ingest.py b/python/tests/import_ingest.py index 92539f1a..955c97cb 100644 --- a/python/tests/import_ingest.py +++ b/python/tests/import_ingest.py @@ -155,7 +155,7 @@ def test_ingest_dict_parse_old(ingest_importer): # ancient ingest requests had no type; skip them f = ingest_importer.parse_record(raw) - assert f == None + assert f is None raw['request']['ingest_type'] = 'pdf' f = ingest_importer.parse_record(raw) diff --git a/python/tests/import_jalc.py b/python/tests/import_jalc.py index ff757e51..010dfce8 100644 --- a/python/tests/import_jalc.py +++ b/python/tests/import_jalc.py @@ -77,12 +77,12 @@ def test_jalc_xml_parse(jalc_importer): print(r.extra) assert r.title == "New carbides in the Ni-Ti-Mo-C system" - assert r.subtitle == None + assert r.subtitle is None assert r.original_title == "Ni-Ti-Mo-C系に出現する新炭化物相について" assert r.publisher == "Japan Society of Powder and Powder Metallurgy" assert r.release_type == "article-journal" assert r.release_stage == "published" - assert r.license_slug == None + assert r.license_slug is None assert r.ext_ids.doi == "10.2497/jjspm.36.898" assert r.language == "ja" assert r.volume == "36" diff --git a/python/tests/import_jstor.py b/python/tests/import_jstor.py index 8494ffb2..25a904a0 100644 --- a/python/tests/import_jstor.py +++ b/python/tests/import_jstor.py @@ -50,20 +50,20 @@ def test_jstor_xml_parse(jstor_importer): print(r.extra) assert r.title == "On the Universal Law of Attraction, Including that of Gravitation, as a Particular Case of Approximation Deducible from the Principle that Equal and Similar Particles of Matter Move Similarly, Relatively to Each other. [Abstract]" - assert r.subtitle == None - assert r.original_title == None + assert r.subtitle is None + assert r.original_title is None assert r.publisher == "The Royal Society" assert r.release_type == "abstract" assert r.release_stage == "published" - assert r.license_slug == None - assert r.ext_ids.doi == None + assert r.license_slug is None + assert r.ext_ids.doi is None assert r.ext_ids.jstor == "111039" assert r.language == "en" assert r.volume == "5" - assert r.issue == None + assert r.issue is None assert r.pages == "831-832" # None because jan 1st - assert r.release_date == None + assert r.release_date is None assert r.release_year == 1843 # matched by ISSN, so shouldn't be in there? #assert extra['container_name'] == "Abstracts of the Papers Communicated to the Royal Society of London" @@ -74,4 +74,4 @@ def test_jstor_xml_parse(jstor_importer): assert r.contribs[0].given_name == "John Kinnersley" assert r.contribs[0].surname == "Smythies" - assert r.refs == None + assert r.refs is None diff --git a/python/tests/import_pubmed.py b/python/tests/import_pubmed.py index 10ded3fc..8b5ff3e5 100644 --- a/python/tests/import_pubmed.py +++ b/python/tests/import_pubmed.py @@ -50,19 +50,19 @@ def test_pubmed_xml_parse(pubmed_importer): r2 = pubmed_importer.parse_record(soup.find_all("PubmedArticle")[-1]) assert r1.title == "Hospital debt management and cost reimbursement" - assert r1.subtitle == None - assert r1.original_title == None - assert r1.publisher == None + assert r1.subtitle is None + assert r1.original_title is None + assert r1.publisher is None assert r1.release_type == "article-journal" assert r1.release_stage == "published" - assert r1.license_slug == None - assert r1.ext_ids.doi == None + assert r1.license_slug is None + assert r1.ext_ids.doi is None assert r1.ext_ids.pmid == "973217" assert r1.language == "en" assert r1.volume == "3" assert r1.issue == "1" assert r1.pages == "69-81" - assert r1.release_date == None # not "1976-12-03", which is medline ingest date + assert r1.release_date is None # not "1976-12-03", which is medline ingest date assert r1.release_year == 1976 # matched by ISSN, so shouldn't be in there? #assert extra['container_name'] == "Abstracts of the Papers Communicated to the Royal Society of London" @@ -77,18 +77,18 @@ def test_pubmed_xml_parse(pubmed_importer): assert not r1.refs assert r2.title == "Synthesis and Antibacterial Activity of Metal(loid) Nanostructures by Environmental Multi-Metal(loid) Resistant Bacteria and Metal(loid)-Reducing Flavoproteins" - assert r2.subtitle == None - assert r2.original_title == None - assert r2.publisher == None + assert r2.subtitle is None + assert r2.original_title is None + assert r2.publisher is None assert r2.release_type == "article-journal" assert r2.release_stage == "published" - assert r2.license_slug == None + assert r2.license_slug is None assert r2.ext_ids.doi == "10.3389/fmicb.2018.00959" assert r2.ext_ids.pmid == "29869640" assert r2.ext_ids.pmcid == "PMC5962736" assert r2.language == "en" assert r2.volume == "9" - assert r2.issue == None + assert r2.issue is None assert r2.pages == "959" assert str(r2.release_date) == "2018-05-15" assert r2.release_year == 2018 diff --git a/python/tests/importer.py b/python/tests/importer.py index a412b247..40bd8cba 100644 --- a/python/tests/importer.py +++ b/python/tests/importer.py @@ -9,24 +9,24 @@ def test_issnl_mapping_lookup(api): assert fi.issn2issnl('0000-0027') == '0002-0027' assert fi.issn2issnl('0002-0027') == '0002-0027' - assert fi.issn2issnl('9999-0027') == None + assert fi.issn2issnl('9999-0027') is None - assert fi.lookup_issnl('9999-9999') == None + assert fi.lookup_issnl('9999-9999') is None def test_identifiers(api): with open('tests/files/ISSN-to-ISSN-L.snip.txt', 'r') as issn_file: ci = CrossrefImporter(api, issn_map_file=issn_file) - assert ci.is_issnl("1234-5678") == True - assert ci.is_issnl("1234-5678.") == False - assert ci.is_issnl("12345678") == False - assert ci.is_issnl("1-2345678") == False + assert ci.is_issnl("1234-5678") is True + assert ci.is_issnl("1234-5678.") is False + assert ci.is_issnl("12345678") is False + assert ci.is_issnl("1-2345678") is False oi = OrcidImporter(api) - assert oi.is_orcid("0000-0003-3118-6591") == True - assert oi.is_orcid("0000-0003-3953-765X") == True - assert oi.is_orcid("0000-00x3-3118-659") == False - assert oi.is_orcid("0000-00033118-659") == False - assert oi.is_orcid("0000-0003-3118-659.") == False + assert oi.is_orcid("0000-0003-3118-6591") is True + assert oi.is_orcid("0000-0003-3953-765X") is True + assert oi.is_orcid("0000-00x3-3118-659") is False + assert oi.is_orcid("0000-00033118-659") is False + assert oi.is_orcid("0000-0003-3118-659.") is False diff --git a/python/tests/transform_elasticsearch.py b/python/tests/transform_elasticsearch.py index c11c5972..cee37867 100644 --- a/python/tests/transform_elasticsearch.py +++ b/python/tests/transform_elasticsearch.py @@ -83,15 +83,15 @@ def test_rich_elasticsearch_convert(): assert es['ref_linked_count'] == 1 assert es['preservation'] == "bright" - assert es['is_oa'] == True - assert es['is_longtail_oa'] == False - assert es['is_preserved'] == True - assert es['in_web'] == True - assert es['in_dweb'] == True - assert es['in_ia'] == True - assert es['in_ia_sim'] == False - assert es['in_kbart'] == True - assert es['in_jstor'] == True + assert es['is_oa'] is True + assert es['is_longtail_oa'] is False + assert es['is_preserved'] is True + assert es['in_web'] is True + assert es['in_dweb'] is True + assert es['in_ia'] is True + assert es['in_ia_sim'] is False + assert es['in_kbart'] is True + assert es['in_jstor'] is True def test_elasticsearch_release_from_json(): r = entity_from_json(open('./tests/files/release_etodop5banbndg3faecnfm6ozi.json', 'r').read(), ReleaseEntity) @@ -103,18 +103,18 @@ def test_elasticsearch_release_from_json(): assert es['first_page'] == "1404" assert es['issue'] == "11" assert es['volume'] == "118" - assert es['number'] == None + assert es['number'] is None assert es['preservation'] == "dark" - assert es['is_oa'] == False - assert es['is_longtail_oa'] == False - assert es['is_preserved'] == True - assert es['in_web'] == False - assert es['in_dweb'] == False - assert es['in_ia'] == False - assert es['in_ia_sim'] == True - assert es['in_kbart'] == True - assert es['in_jstor'] == False + assert es['is_oa'] is False + assert es['is_longtail_oa'] is False + assert es['is_preserved'] is True + assert es['in_web'] is False + assert es['in_dweb'] is False + assert es['in_ia'] is False + assert es['in_ia_sim'] is True + assert es['in_kbart'] is True + assert es['in_jstor'] is False # this release has a fileset, and no file r = entity_from_json(open('./tests/files/release_3mssw2qnlnblbk7oqyv2dafgey.json', 'r').read(), ReleaseEntity) @@ -127,15 +127,15 @@ def test_elasticsearch_release_from_json(): assert es['webcapture_count'] == 0 assert es['preservation'] == "dark" - assert es['is_oa'] == True - assert es['is_longtail_oa'] == False - assert es['is_preserved'] == True - assert es['in_web'] == True - assert es['in_dweb'] == True - assert es['in_ia'] == False - assert es['in_ia_sim'] == False - assert es['in_kbart'] == False - assert es['in_jstor'] == False + assert es['is_oa'] is True + assert es['is_longtail_oa'] is False + assert es['is_preserved'] is True + assert es['in_web'] is True + assert es['in_dweb'] is True + assert es['in_ia'] is False + assert es['in_ia_sim'] is False + assert es['in_kbart'] is False + assert es['in_jstor'] is False # this release has a web capture, and no file (edited the JSON to remove file) r = entity_from_json(open('./tests/files/release_mjtqtuyhwfdr7j2c3l36uor7uy.json', 'r').read(), ReleaseEntity) @@ -148,15 +148,15 @@ def test_elasticsearch_release_from_json(): assert es['webcapture_count'] == 1 assert es['preservation'] == "bright" - assert es['is_oa'] == True - assert es['is_longtail_oa'] == False - assert es['is_preserved'] == True - assert es['in_web'] == True - assert es['in_dweb'] == False - assert es['in_ia'] == True - assert es['in_ia_sim'] == False - assert es['in_kbart'] == False - assert es['in_jstor'] == False + assert es['is_oa'] is True + assert es['is_longtail_oa'] is False + assert es['is_preserved'] is True + assert es['in_web'] is True + assert es['in_dweb'] is False + assert es['in_ia'] is True + assert es['in_ia_sim'] is False + assert es['in_kbart'] is False + assert es['in_jstor'] is False def test_elasticsearch_container_transform(journal_metadata_importer): with open('tests/files/journal_metadata.sample.json', 'r') as f: @@ -200,7 +200,7 @@ def test_elasticsearch_container_transform(journal_metadata_importer): assert es['name'] == c2.name assert es['publisher'] == c2.publisher assert es['keepers'] == list(c2.extra['kbart'].keys()) == ["portico"] - assert es['any_kbart'] == True + assert es['any_kbart'] is True def test_elasticsearch_file_transform(): @@ -219,7 +219,7 @@ def test_elasticsearch_file_transform(): assert es['md5'] == fe.md5 # pylint: disable=no-member assert es['size_bytes'] == fe.size # pylint: disable=no-member assert es['mimetype'] == fe.mimetype # pylint: disable=no-member - assert es['in_ia'] == True + assert es['in_ia'] is True assert 'web' in es['rels'] assert 'www.zhros.ru' in es['hosts'] @@ -227,7 +227,7 @@ def test_elasticsearch_file_transform(): assert 'archive.org' in (es['hosts'] + es['domains']) assert 'web.archive.org' in (es['hosts'] + es['domains']) # old regression - assert not '.archive.org' in (es['hosts'] + es['domains']) + assert '.archive.org' not in (es['hosts'] + es['domains']) def test_elasticsearch_changelog_transform(): ce = entity_from_json(open('./tests/files/changelog_3469683.json', 'r').read(), ChangelogEntry) @@ -238,8 +238,8 @@ def test_elasticsearch_changelog_transform(): assert es['timestamp'][:19] == "2020-01-30T05:04:39.738601Z"[:19] assert es['editor_id'] == "scmbogxw25evtcesfcab5qaboa" assert es['username'] == "crawl-bot" - assert es['is_bot'] == True - assert es['is_admin'] == True + assert es['is_bot'] is True + assert es['is_admin'] is True assert es['agent'] == "fatcat_tools.IngestFileResultImporter" assert es['total'] == 50 @@ -279,15 +279,15 @@ def test_elasticsearch_release_kbart_year(): assert es['release_year'] == this_year assert es['preservation'] == "none" - assert es['is_oa'] == True - assert es['is_longtail_oa'] == False - assert es['is_preserved'] == False - assert es['in_web'] == False - assert es['in_dweb'] == False - assert es['in_ia'] == False - assert es['in_ia_sim'] == False - assert es['in_kbart'] == False - assert es['in_jstor'] == False + assert es['is_oa'] is True + assert es['is_longtail_oa'] is False + assert es['is_preserved'] is False + assert es['in_web'] is False + assert es['in_dweb'] is False + assert es['in_ia'] is False + assert es['in_ia_sim'] is False + assert es['in_kbart'] is False + assert es['in_jstor'] is False r.container = ContainerEntity( name="dummy journal", @@ -303,12 +303,12 @@ def test_elasticsearch_release_kbart_year(): assert es['release_year'] == this_year assert es['preservation'] == "dark" - assert es['is_oa'] == True - assert es['is_longtail_oa'] == False - assert es['is_preserved'] == True - assert es['in_web'] == False - assert es['in_dweb'] == False - assert es['in_ia'] == False - assert es['in_ia_sim'] == False - assert es['in_kbart'] == True - assert es['in_jstor'] == False + assert es['is_oa'] is True + assert es['is_longtail_oa'] is False + assert es['is_preserved'] is True + assert es['in_web'] is False + assert es['in_dweb'] is False + assert es['in_ia'] is False + assert es['in_ia_sim'] is False + assert es['in_kbart'] is True + assert es['in_jstor'] is False diff --git a/python/tests/web_citation_csl.py b/python/tests/web_citation_csl.py index 50a2d6e8..a72742cb 100644 --- a/python/tests/web_citation_csl.py +++ b/python/tests/web_citation_csl.py @@ -47,7 +47,7 @@ def test_release_bibtex(app, api): rv = app.get('/release/{}'.format(r1edit.ident)) assert rv.status_code == 200 - assert not b'BibTeX' in rv.data + assert b'BibTeX' not in rv.data with pytest.raises(ValueError): rv = app.get('/release/{}.bib'.format(r1edit.ident)) diff --git a/python/tests/web_entity_views.py b/python/tests/web_entity_views.py index 4068a0c7..45fd3e9a 100644 --- a/python/tests/web_entity_views.py +++ b/python/tests/web_entity_views.py @@ -332,7 +332,7 @@ def test_web_release_login(full_app, app_admin): rv = app_admin.post('/release/create', data=form.data, follow_redirects=True) assert rv.status_code == 400 assert b'My Research: Missing Some Stuff' in rv.data - assert not b'already' in rv.data + assert b'already' not in rv.data with full_app.test_request_context(): form = ReleaseEntityForm() -- cgit v1.2.3