aboutsummaryrefslogtreecommitdiffstats
path: root/python
diff options
context:
space:
mode:
authorMartin Czygan <martin@archive.org>2020-11-24 19:29:07 +0000
committerMartin Czygan <martin@archive.org>2020-11-24 19:29:07 +0000
commitcfd13852d7cb58fcc3387373960adaf3680f0faf (patch)
tree675954b8b34324fe22fc5a00f3fbb99a21a77a21 /python
parentfcfcd3224a113fa90da2045a3c7fe90127088ebe (diff)
parent1fca5a9822944d0646d2dcba6cf54f27a0ffe5c0 (diff)
downloadfatcat-cfd13852d7cb58fcc3387373960adaf3680f0faf.tar.gz
fatcat-cfd13852d7cb58fcc3387373960adaf3680f0faf.zip
Merge branch 'bnewbold-doaj-metadata' into 'master'
DOAJ article metadata import See merge request webgroup/fatcat!89
Diffstat (limited to 'python')
-rw-r--r--python/README_import.md7
-rwxr-xr-xpython/fatcat_import.py37
-rw-r--r--python/fatcat_tools/importers/__init__.py1
-rw-r--r--python/fatcat_tools/importers/common.py158
-rw-r--r--python/fatcat_tools/importers/crossref.py2
-rw-r--r--python/fatcat_tools/importers/datacite.py2
-rw-r--r--python/fatcat_tools/importers/doaj_article.py358
-rw-r--r--python/fatcat_tools/normal.py332
-rw-r--r--python/fatcat_tools/transforms/elasticsearch.py3
-rw-r--r--python/fatcat_web/templates/release_view.html8
-rw-r--r--python/tests/files/example_doaj_articles.json5
-rw-r--r--python/tests/import_doaj.py142
12 files changed, 895 insertions, 160 deletions
diff --git a/python/README_import.md b/python/README_import.md
index 65c08f8b..71b15eee 100644
--- a/python/README_import.md
+++ b/python/README_import.md
@@ -126,3 +126,10 @@ Run import in parallel:
zcat /srv/fatcat/datasets/crossref-pre-1923-scholarly-works.matched.json.gz | time parallel -j12 --round-robin --pipe ./fatcat_import.py matched - --default-mime 'application/pdf'
+## DOAJ
+
+Takes a few hours.
+
+ export FATCAT_API_AUTH_TOKEN=... (FATCAT_AUTH_WORKER_DOAJ)
+
+ zcat /srv/fatcat/datasets/doaj_article_data_2020-11-13_all.json.gz | pv -l | parallel -j12 --round-robin --pipe ./fatcat_import.py doaj-article --issn-map-file /srv/fatcat/datasets/ISSN-to-ISSN-L.txt -
diff --git a/python/fatcat_import.py b/python/fatcat_import.py
index 19cf43ec..ff6c94dc 100755
--- a/python/fatcat_import.py
+++ b/python/fatcat_import.py
@@ -256,6 +256,24 @@ def run_datacite(args):
else:
JsonLinePusher(dci, args.json_file).run()
+def run_doaj_article(args):
+ dai = DoajArticleImporter(args.api,
+ args.issn_map_file,
+ edit_batch_size=args.batch_size,
+ do_updates=args.do_updates,
+ )
+ if args.kafka_mode:
+ KafkaJsonPusher(
+ dai,
+ args.kafka_hosts,
+ args.kafka_env,
+ "api-doaj",
+ "fatcat-{}-import-doaj".format(args.kafka_env),
+ consume_batch_size=args.batch_size,
+ ).run()
+ else:
+ JsonLinePusher(dai, args.json_file).run()
+
def run_file_meta(args):
# do_updates defaults to true for this importer
fmi = FileMetaImporter(args.api,
@@ -606,6 +624,25 @@ def main():
auth_var="FATCAT_AUTH_WORKER_DATACITE",
)
+ sub_doaj_article = subparsers.add_parser('doaj-article',
+ help="import doaj.org article metadata")
+ sub_doaj_article.add_argument('json_file',
+ help="File with JSON lines from DOAJ API (or bulk dump) to import from",
+ default=sys.stdin, type=argparse.FileType('r'))
+ sub_doaj_article.add_argument('--issn-map-file',
+ help="ISSN to ISSN-L mapping file",
+ default=None, type=argparse.FileType('r'))
+ sub_doaj_article.add_argument('--kafka-mode',
+ action='store_true',
+ help="consume from kafka topic (not stdin)")
+ sub_doaj_article.add_argument('--do-updates',
+ action='store_true',
+ help="update any pre-existing release entities")
+ sub_doaj_article.set_defaults(
+ func=run_doaj_article,
+ auth_var="FATCAT_AUTH_WORKER_DOAJ",
+ )
+
sub_file_meta = subparsers.add_parser('file-meta',
help="simple update-only importer for file metadata")
sub_file_meta.set_defaults(
diff --git a/python/fatcat_tools/importers/__init__.py b/python/fatcat_tools/importers/__init__.py
index c08e04c2..d2928d09 100644
--- a/python/fatcat_tools/importers/__init__.py
+++ b/python/fatcat_tools/importers/__init__.py
@@ -30,3 +30,4 @@ from .cdl_dash_dat import auto_cdl_dash_dat
from .ingest import IngestFileResultImporter, SavePaperNowFileImporter, IngestWebResultImporter
from .shadow import ShadowLibraryImporter
from .file_meta import FileMetaImporter
+from .doaj_article import DoajArticleImporter
diff --git a/python/fatcat_tools/importers/common.py b/python/fatcat_tools/importers/common.py
index 14415683..3c810391 100644
--- a/python/fatcat_tools/importers/common.py
+++ b/python/fatcat_tools/importers/common.py
@@ -3,12 +3,9 @@ import re
import sys
import csv
import json
-import ftfy
-import base64
import sqlite3
import datetime
import subprocess
-import unicodedata
from collections import Counter
from confluent_kafka import Consumer, KafkaException
import xml.etree.ElementTree as ET
@@ -18,162 +15,13 @@ from bs4 import BeautifulSoup
import fatcat_openapi_client
from fatcat_openapi_client.rest import ApiException
+# TODO: refactor so remove need for this (re-imports for backwards compatibility)
+from fatcat_tools.normal import (clean_str as clean, is_cjk, b32_hex, LANG_MAP_MARC) # noqa: F401
DATE_FMT = "%Y-%m-%d"
SANE_MAX_RELEASES = 200
SANE_MAX_URLS = 100
-# These are very close, but maybe not exactly 1-to-1 with 639-2? Some mix of
-# 2/T and 2/B?
-# PubMed/MEDLINE and JSTOR use these MARC codes
-# https://www.loc.gov/marc/languages/language_name.html
-LANG_MAP_MARC = {
- 'afr': 'af',
- 'alb': 'sq',
- 'amh': 'am',
- 'ara': 'ar',
- 'arm': 'hy',
- 'aze': 'az',
- 'ben': 'bn',
- 'bos': 'bs',
- 'bul': 'bg',
- 'cat': 'ca',
- 'chi': 'zh',
- 'cze': 'cs',
- 'dan': 'da',
- 'dut': 'nl',
- 'eng': 'en',
- 'epo': 'eo',
- 'est': 'et',
- 'fin': 'fi',
- 'fre': 'fr',
- 'geo': 'ka',
- 'ger': 'de',
- 'gla': 'gd',
- 'gre': 'el',
- 'heb': 'he',
- 'hin': 'hi',
- 'hrv': 'hr',
- 'hun': 'hu',
- 'ice': 'is',
- 'ind': 'id',
- 'ita': 'it',
- 'jpn': 'ja',
- 'kin': 'rw',
- 'kor': 'ko',
- 'lat': 'la',
- 'lav': 'lv',
- 'lit': 'lt',
- 'mac': 'mk',
- 'mal': 'ml',
- 'mao': 'mi',
- 'may': 'ms',
- 'nor': 'no',
- 'per': 'fa',
- 'per': 'fa',
- 'pol': 'pl',
- 'por': 'pt',
- 'pus': 'ps',
- 'rum': 'ro',
- 'rus': 'ru',
- 'san': 'sa',
- 'slo': 'sk',
- 'slv': 'sl',
- 'spa': 'es',
- 'srp': 'sr',
- 'swe': 'sv',
- 'tha': 'th',
- 'tur': 'tr',
- 'ukr': 'uk',
- 'urd': 'ur',
- 'vie': 'vi',
- 'wel': 'cy',
-
-# additions
- 'gle': 'ga', # "Irish" (Gaelic)
- 'jav': 'jv', # Javanese
- 'welsh': 'cy', # Welsh
- 'oci': 'oc', # Occitan
-
-# Don't have ISO 639-1 codes
- 'grc': 'el', # Ancient Greek; map to modern greek
- 'map': None, # Austronesian (collection)
- 'syr': None, # Syriac, Modern
- 'gem': None, # Old Saxon
- 'non': None, # Old Norse
- 'emg': None, # Eastern Meohang
- 'neg': None, # Negidal
- 'mul': None, # Multiple languages
- 'und': None, # Undetermined
-}
-
-
-def clean(thing, force_xml=False):
- """
- This function is appropriate to be called on any random, non-markup string,
- such as author names, titles, etc.
-
- It will try to clean up common unicode mangles, HTML characters, etc.
-
- This will detect XML/HTML and "do the right thing" (aka, not remove
- entities like '&amp' if there are tags in the string), unless you pass the
- 'force_xml' parameter, which might be appropriate for, eg, names and
- titles, which generally should be projected down to plain text.
-
- Also strips extra whitespace.
- """
- if not thing:
- return None
- fix_entities = 'auto'
- if force_xml:
- fix_entities = True
- fixed = ftfy.fix_text(thing, fix_entities=fix_entities).strip()
- if not fixed or len(fixed) <= 1:
- # wasn't zero-length before, but is now; return None
- return None
- return fixed
-
-def test_clean():
-
- assert clean(None) == None
- assert clean('') == None
- assert clean('1') == None
- assert clean('123') == '123'
- assert clean('a&amp;b') == 'a&b'
- assert clean('<b>a&amp;b</b>') == '<b>a&amp;b</b>'
- assert clean('<b>a&amp;b</b>', force_xml=True) == '<b>a&b</b>'
-
-def b32_hex(s):
- s = s.strip().split()[0].lower()
- if s.startswith("sha1:"):
- s = s[5:]
- if len(s) != 32:
- return s
- return base64.b16encode(base64.b32decode(s.upper())).lower().decode('utf-8')
-
-def is_cjk(s):
- if not s:
- return False
- for c in s:
- if c.isalpha():
- lang_prefix = unicodedata.name(c).split()[0]
- return lang_prefix in ('CJK', 'HIRAGANA', 'KATAKANA', 'HANGUL')
- return False
-
-def test_is_cjk():
- assert is_cjk(None) is False
- assert is_cjk('') is False
- assert is_cjk('blah') is False
- assert is_cjk('岡, 鹿, 梨, 阜, 埼') is True
- assert is_cjk('[岡, 鹿, 梨, 阜, 埼]') is True
- assert is_cjk('菊') is True
- assert is_cjk('岡, 鹿, 梨, 阜, 埼 with eng after') is True
- assert is_cjk('水道') is True
- assert is_cjk('オウ, イク') is True # kanji
- assert is_cjk('ひヒ') is True
- assert is_cjk('き゚ゅ') is True
- assert is_cjk('ㄴ, ㄹ, ㅁ, ㅂ, ㅅ') is True
-
DOMAIN_REL_MAP = {
"archive.org": "archive",
# LOCKSS, Portico, DuraSpace, etc would also be "archive"
@@ -444,6 +292,7 @@ class EntityImporter:
raise NotImplementedError
def is_orcid(self, orcid):
+ # TODO: replace with clean_orcid() from fatcat_tools.normal
return self._orcid_regex.match(orcid) is not None
def lookup_orcid(self, orcid):
@@ -464,6 +313,7 @@ class EntityImporter:
return creator_id
def is_doi(self, doi):
+ # TODO: replace with clean_doi() from fatcat_tools.normal
return doi.startswith("10.") and doi.count("/") >= 1
def lookup_doi(self, doi):
diff --git a/python/fatcat_tools/importers/crossref.py b/python/fatcat_tools/importers/crossref.py
index 71f08952..e77fa65e 100644
--- a/python/fatcat_tools/importers/crossref.py
+++ b/python/fatcat_tools/importers/crossref.py
@@ -487,8 +487,6 @@ class CrossrefImporter(EntityImporter):
except fatcat_openapi_client.rest.ApiException as err:
if err.status != 404:
raise err
- # doesn't exist, need to update
- return True
# eventually we'll want to support "updates", but for now just skip if
# entity already exists
diff --git a/python/fatcat_tools/importers/datacite.py b/python/fatcat_tools/importers/datacite.py
index 5cdc5577..70f8db86 100644
--- a/python/fatcat_tools/importers/datacite.py
+++ b/python/fatcat_tools/importers/datacite.py
@@ -781,8 +781,6 @@ class DataciteImporter(EntityImporter):
except fatcat_openapi_client.rest.ApiException as err:
if err.status != 404:
raise err
- # doesn't exist, need to update
- return True
# eventually we'll want to support "updates", but for now just skip if
# entity already exists
diff --git a/python/fatcat_tools/importers/doaj_article.py b/python/fatcat_tools/importers/doaj_article.py
new file mode 100644
index 00000000..03752484
--- /dev/null
+++ b/python/fatcat_tools/importers/doaj_article.py
@@ -0,0 +1,358 @@
+"""
+Importer for DOAJ article-level metadata, schema v1.
+
+DOAJ API schema and docs: https://doaj.org/api/v1/docs
+"""
+
+import warnings
+import datetime
+from typing import List, Optional
+
+import fatcat_openapi_client
+from fatcat_tools.normal import (clean_doi, clean_str, parse_month,
+ clean_orcid, detect_text_lang, parse_lang_name, parse_country_name,
+ clean_pmid, clean_pmcid)
+from fatcat_tools.importers.common import EntityImporter
+
+# Cutoff length for abstracts.
+MAX_ABSTRACT_LENGTH = 2048
+
+
+class DoajArticleImporter(EntityImporter):
+
+ def __init__(self,
+ api,
+ issn_map_file,
+ **kwargs):
+
+ eg_desc = kwargs.get(
+ 'editgroup_description',
+ "Automated import of DOAJ article metadata, harvested from REST API or bulk dumps"
+ )
+ eg_extra = kwargs.get('editgroup_extra', dict())
+ eg_extra['agent'] = eg_extra.get('agent',
+ 'fatcat_tools.DoajArticleImporter')
+ # ensure default is to not do updates with this worker (override super() default)
+ kwargs['do_updates'] = kwargs.get("do_updates", False)
+ super().__init__(api,
+ issn_map_file=issn_map_file,
+ editgroup_description=eg_desc,
+ editgroup_extra=eg_extra,
+ **kwargs)
+
+ self.this_year = datetime.datetime.now().year
+ self.read_issn_map_file(issn_map_file)
+
+ def want(self, obj):
+ return True
+
+ def parse_record(self, obj):
+ """
+ bibjson {
+ abstract (string, optional),
+ author (Array[bibjson.author], optional),
+ identifier (Array[bibjson.identifier]),
+ journal (bibjson.journal, optional),
+ keywords (Array[string], optional),
+ link (Array[bibjson.link], optional),
+ month (string, optional),
+ subject (Array[bibjson.subject], optional),
+ title (string),
+ year (string, optional)
+ }
+ bibjson.journal {
+ country (string, optional),
+ end_page (string, optional),
+ language (Array[string], optional),
+ license (Array[bibjson.journal.license], optional),
+ number (string, optional),
+ publisher (string, optional),
+ start_page (string, optional),
+ title (string, optional),
+ volume (string, optional)
+ }
+ """
+
+ if not obj or not isinstance(obj, dict) or not 'bibjson' in obj:
+ self.counts['skip-empty'] += 1
+ return None
+
+ bibjson = obj['bibjson']
+
+ title = clean_str(bibjson.get('title'), force_xml=True)
+ if not title:
+ self.counts['skip-title'] += 1
+ return False
+
+ container_name = clean_str(bibjson['journal']['title'])
+ container_id = None
+ # NOTE: 'issns' not documented in API schema
+ for issn in bibjson['journal']['issns']:
+ issnl = self.issn2issnl(issn)
+ if issnl:
+ container_id = self.lookup_issnl(self.issn2issnl(issn))
+ if container_id:
+ # don't store container_name when we have an exact match
+ container_name = None
+ break
+
+ volume = clean_str(bibjson['journal'].get('volume'))
+ # NOTE: this schema seems to use "number" as "issue number"
+ issue = clean_str(bibjson['journal'].get('number'))
+ publisher = clean_str(bibjson['journal'].get('publisher'))
+
+ try:
+ release_year = int(bibjson.get('year'))
+ except (TypeError, ValueError):
+ release_year = None
+ release_month = parse_month(clean_str(bibjson.get('month')))
+
+ # block bogus far-future years/dates
+ if release_year is not None and (release_year > (self.this_year + 5) or release_year < 1000):
+ release_month = None
+ release_year = None
+
+ license_slug = self.doaj_license_slug(bibjson['journal'].get('license'))
+ country = parse_country_name(bibjson['journal'].get('country'))
+ language = None
+ for raw in bibjson['journal'].get('language') or []:
+ language = parse_lang_name(raw)
+ if language:
+ break
+
+ # pages
+ # NOTE: error in API docs? seems like start_page not under 'journal' object
+ start_page = clean_str(bibjson['journal'].get('start_page')) or clean_str(bibjson.get('start_page'))
+ end_page = clean_str(bibjson['journal'].get('end_page')) or clean_str(bibjson.get('end_page'))
+ pages: Optional[str] = None
+ if start_page and end_page:
+ pages = f"{start_page}-{end_page}"
+ elif start_page:
+ pages = start_page
+
+ doaj_article_id = obj['id'].lower()
+ ext_ids = self.doaj_ext_ids(bibjson['identifier'], doaj_article_id)
+ abstracts = self.doaj_abstracts(bibjson)
+ contribs = self.doaj_contribs(bibjson.get('author') or [])
+
+ # DOAJ-specific extra
+ doaj_extra = dict()
+ if bibjson.get('subject'):
+ doaj_extra['subject'] = bibjson.get('subject')
+ if bibjson.get('keywords'):
+ doaj_extra['keywords'] = [k for k in [clean_str(s) for s in bibjson.get('keywords')] if k]
+
+ # generic extra
+ extra = dict()
+ if country:
+ extra['country'] = country
+ if not container_id and container_name:
+ extra['container_name'] = container_name
+ if release_year and release_month:
+ # TODO: schema migration
+ extra['release_month'] = release_month
+
+ if doaj_extra:
+ extra['doaj'] = doaj_extra
+ if not extra:
+ extra = None
+
+ re = fatcat_openapi_client.ReleaseEntity(
+ work_id=None,
+ container_id=container_id,
+ release_type='article-journal',
+ release_stage='published',
+ title=title,
+ release_year=release_year,
+ #release_date,
+ publisher=publisher,
+ ext_ids=ext_ids,
+ contribs=contribs,
+ volume=volume,
+ issue=issue,
+ pages=pages,
+ language=language,
+ abstracts=abstracts,
+ extra=extra,
+ license_slug=license_slug,
+ )
+ re = self.biblio_hacks(re)
+ return re
+
+ @staticmethod
+ def biblio_hacks(re):
+ """
+ This function handles known special cases. For example,
+ publisher-specific or platform-specific workarounds.
+ """
+ return re
+
+ def try_update(self, re):
+
+ # lookup existing release by DOAJ article id
+ existing = None
+ try:
+ existing = self.api.lookup_release(doaj=re.ext_ids.doaj)
+ except fatcat_openapi_client.rest.ApiException as err:
+ if err.status != 404:
+ raise err
+
+ # then try other ext_id lookups
+ if not existing:
+ for extid_type in ('doi', 'pmid', 'pmcid'):
+ extid_val = getattr(re.ext_ids, extid_type)
+ if not extid_val:
+ continue
+ #print(f" lookup release type: {extid_type} val: {extid_val}")
+ try:
+ existing = self.api.lookup_release(**{extid_type: extid_val})
+ except fatcat_openapi_client.rest.ApiException as err:
+ if err.status != 404:
+ raise err
+ if existing:
+ if existing.ext_ids.doaj:
+ warn_str = f"unexpected DOAJ ext_id match after lookup failed doaj={re.ext_ids.doaj} ident={existing.ident}"
+ warnings.warn(warn_str)
+ self.counts["skip-doaj-id-mismatch"] += 1
+ return False
+ break
+
+ # TODO: in the future could do fuzzy match here, eg using elasticsearch
+
+ # create entity
+ if not existing:
+ return True
+
+ # other logic could go here about skipping updates
+ if not self.do_updates or existing.ext_ids.doaj:
+ self.counts['exists'] += 1
+ return False
+
+ # fields to copy over for update
+ existing.ext_ids.doaj = existing.ext_ids.doaj or re.ext_ids.doaj
+ existing.release_type = existing.release_type or re.release_type
+ existing.release_stage = existing.release_stage or re.release_stage
+ existing.container_id = existing.container_id or re.container_id
+ existing.abstracts = existing.abstracts or re.abstracts
+ existing.extra['doaj'] = re.extra['doaj']
+ existing.volume = existing.volume or re.volume
+ existing.issue = existing.issue or re.issue
+ existing.pages = existing.pages or re.pages
+ existing.language = existing.language or re.language
+
+ try:
+ self.api.update_release(self.get_editgroup_id(), existing.ident, existing)
+ self.counts['update'] += 1
+ except fatcat_openapi_client.rest.ApiException as err:
+ # there is a code path where we try to update the same release
+ # twice in a row; if that happens, just skip
+ # NOTE: API behavior might change in the future?
+ if "release_edit_editgroup_id_ident_id_key" in err.body:
+ self.counts['skip-update-conflict'] += 1
+ return False
+ else:
+ raise err
+
+ return False
+
+ def insert_batch(self, batch):
+ self.api.create_release_auto_batch(fatcat_openapi_client.ReleaseAutoBatch(
+ editgroup=fatcat_openapi_client.Editgroup(
+ description=self.editgroup_description,
+ extra=self.editgroup_extra),
+ entity_list=batch))
+
+ def doaj_abstracts(self, bibjson: dict) -> List[fatcat_openapi_client.ReleaseAbstract]:
+ text = clean_str(bibjson.get('abstract'))
+ if not text or len(text) < 10:
+ return []
+ if len(text) > MAX_ABSTRACT_LENGTH:
+ text = text[:MAX_ABSTRACT_LENGTH] + " [...]"
+
+ lang = detect_text_lang(text)
+
+ abstract = fatcat_openapi_client.ReleaseAbstract(
+ mimetype="text/plain",
+ content=text,
+ lang=lang,
+ )
+
+ return [abstract,]
+
+ def doaj_contribs(self, authors: List[dict]) -> List[fatcat_openapi_client.ReleaseContrib]:
+ """
+ bibjson.author {
+ affiliation (string, optional),
+ name (string),
+ orcid_id (string, optional)
+ }
+ """
+ contribs = []
+ index = 0
+ for author in authors:
+ if not author.get('name'):
+ continue
+ creator_id = None
+ orcid = clean_orcid(author.get('orcid_id'))
+ if orcid:
+ creator_id = self.lookup_orcid(orcid)
+ contribs.append(fatcat_openapi_client.ReleaseContrib(
+ raw_name=author.get('name'),
+ role='author',
+ index=index,
+ creator_id=creator_id,
+ raw_affiliation=clean_str(author.get('affiliation')),
+ ))
+ index += 1
+ return contribs
+
+ def doaj_ext_ids(self, identifiers: List[dict], doaj_article_id: str) -> fatcat_openapi_client.ReleaseExtIds:
+ """
+ bibjson.identifier {
+ id (string),
+ type (string)
+ }
+ """
+
+ assert doaj_article_id.isalnum() and len(doaj_article_id) == 32
+
+ doi: Optional[str] = None
+ pmid: Optional[str] = None
+ pmcid: Optional[str] = None
+ for id_obj in identifiers:
+ if not id_obj.get('id'):
+ continue
+ if id_obj['type'].lower() == 'doi':
+ doi = clean_doi(id_obj['id'])
+ elif id_obj['type'].lower() == 'pmid':
+ pmid = clean_pmid(id_obj['id'])
+ elif id_obj['type'].lower() == 'pmcid':
+ pmcid = clean_pmcid(id_obj['id'])
+
+ return fatcat_openapi_client.ReleaseExtIds(
+ doaj=doaj_article_id,
+ doi=doi,
+ pmid=pmid,
+ pmcid=pmcid,
+ )
+
+ def doaj_license_slug(self, license_list: List[dict]) -> Optional[str]:
+ """
+ bibjson.journal.license {
+ open_access (boolean, optional),
+ title (string, optional),
+ type (string, optional),
+ url (string, optional),
+ version (string, optional)
+ }
+ """
+ if not license_list:
+ return None
+ for license in license_list:
+ if not license.get('open_access'):
+ continue
+ slug = license.get('type')
+ if slug.startswith('CC '):
+ slug = slug.replace('CC ', 'cc-').lower()
+ return slug
+ return None
diff --git a/python/fatcat_tools/normal.py b/python/fatcat_tools/normal.py
index 10a90dba..a3d6bccc 100644
--- a/python/fatcat_tools/normal.py
+++ b/python/fatcat_tools/normal.py
@@ -5,6 +5,13 @@ free-form input, titles, etc.
"""
import re
+import base64
+from typing import Optional
+import unicodedata
+
+import ftfy
+import langdetect
+import pycountry
DOI_REGEX = re.compile(r"^10.\d{3,6}/\S+$")
@@ -55,7 +62,8 @@ def clean_doi(raw):
# will likely want to expand DOI_REGEX to exclude non-ASCII characters, but
# for now block specific characters so we can get PubMed importer running
# again.
- if 'ä' in raw or '\u200e' in raw:
+ # known characters to skip: ä á \u200e \u2043 \u2012
+ if not raw.isascii():
return None
return raw
@@ -72,6 +80,10 @@ def test_clean_doi():
assert clean_doi("10.4149/gpb¬_2017042") == None # "logical negation" character
assert clean_doi("10.6002/ect.2020.häyry") == None # this example via pubmed (pmid:32519616)
assert clean_doi("10.30466/vrf.2019.98547.2350\u200e") == None
+ assert clean_doi("10.12016/j.issn.2096⁃1456.2017.06.014") == None
+ assert clean_doi("10.4025/diálogos.v17i2.36030") == None
+ assert clean_doi("10.19027/jai.10.106‒115") == None
+ assert clean_doi("10.15673/атбп2312-3125.17/2014.26332") == None
ARXIV_ID_REGEX = re.compile(r"^(\d{4}.\d{4,5}|[a-z\-]+(\.[A-Z]{2})?/\d{7})(v\d+)?$")
@@ -233,3 +245,321 @@ def test_clean_orcid():
assert clean_orcid("0123-4567-3456-6789 ") == "0123-4567-3456-6789"
assert clean_orcid("01234567-3456-6780") == None
assert clean_orcid("0x23-4567-3456-6780") == None
+
+
+def clean_str(thing: Optional[str], force_xml: bool = False) -> Optional[str]:
+ """
+ This function is appropriate to be called on any random, non-markup string,
+ such as author names, titles, etc.
+
+ It will try to clean up common unicode mangles, HTML characters, etc.
+
+ This will detect XML/HTML and "do the right thing" (aka, not remove
+ entities like '&amp' if there are tags in the string), unless you pass the
+ 'force_xml' parameter, which might be appropriate for, eg, names and
+ titles, which generally should be projected down to plain text.
+
+ Also strips extra whitespace.
+ """
+ if not thing:
+ return None
+ fix_entities = 'auto'
+ if force_xml:
+ fix_entities = True
+ fixed = ftfy.fix_text(thing, fix_entities=fix_entities).strip()
+ if not fixed or len(fixed) <= 1:
+ # wasn't zero-length before, but is now; return None
+ return None
+ return fixed
+
+def test_clean_str():
+
+ assert clean_str(None) == None
+ assert clean_str('') == None
+ assert clean_str('1') == None
+ assert clean_str('123') == '123'
+ assert clean_str('a&amp;b') == 'a&b'
+ assert clean_str('<b>a&amp;b</b>') == '<b>a&amp;b</b>'
+ assert clean_str('<b>a&amp;b</b>', force_xml=True) == '<b>a&b</b>'
+
+def b32_hex(s):
+ s = s.strip().split()[0].lower()
+ if s.startswith("sha1:"):
+ s = s[5:]
+ if len(s) != 32:
+ return s
+ return base64.b16encode(base64.b32decode(s.upper())).lower().decode('utf-8')
+
+def is_cjk(s):
+ if not s:
+ return False
+ for c in s:
+ if c.isalpha():
+ lang_prefix = unicodedata.name(c).split()[0]
+ return lang_prefix in ('CJK', 'HIRAGANA', 'KATAKANA', 'HANGUL')
+ return False
+
+def test_is_cjk():
+ assert is_cjk(None) is False
+ assert is_cjk('') is False
+ assert is_cjk('blah') is False
+ assert is_cjk('岡, 鹿, 梨, 阜, 埼') is True
+ assert is_cjk('[岡, 鹿, 梨, 阜, 埼]') is True
+ assert is_cjk('菊') is True
+ assert is_cjk('岡, 鹿, 梨, 阜, 埼 with eng after') is True
+ assert is_cjk('水道') is True
+ assert is_cjk('オウ, イク') is True # kanji
+ assert is_cjk('ひヒ') is True
+ assert is_cjk('き゚ゅ') is True
+ assert is_cjk('ㄴ, ㄹ, ㅁ, ㅂ, ㅅ') is True
+
+MONTH_MAP = {
+ "jan": 1, "january": 1,
+ "feb": 2, "febuary": 2,
+ "mar": 3, "march": 3,
+ "apr": 4, "april": 4,
+ "may": 5, "may": 5,
+ "jun": 6, "june": 6,
+ "jul": 7, "july": 7,
+ "aug": 8, "august": 8,
+ "sep": 9, "september": 9,
+ "oct": 10, "october": 10,
+ "nov": 11, "nov": 11,
+ "dec": 12, "december": 12,
+}
+
+def parse_month(raw: Optional[str]) -> Optional[int]:
+ """
+ Parses a string into a month number (1 to 12)
+ """
+ if not raw:
+ return None
+ raw = raw.strip().lower()
+ if raw.isdigit():
+ raw_int = int(raw)
+ if raw_int >= 1 and raw_int <= 12:
+ return raw_int
+ else:
+ return None
+ if raw in MONTH_MAP:
+ return MONTH_MAP[raw]
+ return None
+
+def test_parse_month() -> None:
+
+ assert parse_month(None) == None
+ assert parse_month("") == None
+ assert parse_month("0") == None
+ assert parse_month("10") == 10
+ assert parse_month("jan") == 1
+ assert parse_month("September") == 9
+
+def detect_text_lang(raw: str) -> Optional[str]:
+ """
+ Tries to determine language of, eg, an abstract.
+
+ Returns an ISO 631 2-char language code, or None.
+ """
+ if not raw:
+ return None
+ try:
+ lang = langdetect.detect(raw)
+ lang = lang.split('-')[0]
+ assert len(lang) == 2
+ return lang
+ except (langdetect.lang_detect_exception.LangDetectException, TypeError):
+ return None
+ return None
+
+def test_detect_text_lang() -> None:
+ assert detect_text_lang("") == None
+ EN_SAMPLE = "this is a string of English text for testing"
+ assert detect_text_lang(EN_SAMPLE) == "en"
+ JA_SAMPLE = "モーラの種類は、以下に示すように111程度存在する。ただし、研究者により数え方が少しずつ異なる。"
+ assert detect_text_lang(JA_SAMPLE) == "ja"
+ ZH_SAMPLE = "随着分布式清洁能源的普及,通信技术在协调各个分布式电源的控制中显得尤为重要。在电力信息传输的过程中,不同的网络状态下表现出不同的通信特性,严重的甚至会发生信息错乱丢包等行为,这对电网的实时控制产生严重影响。为研究信息系统对电力物理系统的实时影响,搭建了电力信息物理融合仿真平台,运用RT-LAB与OPNET两款实时仿真器,通过TCP/IP进行数据交互,对微电网电压、频率的集中式恢复与分布式恢复问题展开研究。仿真结果表明,该平台能有效地反映通信网络对电网控制的影响,提供了一种可靠的未来电力信息物理融合系统研究技术。随着分布式清洁能源的普及,通信技术在协调各个分布式电源的控制中显得尤为重要。在电力信息传输的过程中,不同的网络状态下表现出不同的通信特性,严重的甚至会发生信息错乱丢包等行为,这对电网的实时控制产生严重影响。为研究信息系统对电力物理系统的实时影响,搭建了电力信息物理融合仿真平台,运用RT-LAB与OPNET两款实时仿真器,通过TCP/IP进行数据交互,对微电网电压、频率的集中式恢复与分布式恢复问题展开研究。仿真结果表明,该平台能有效地反映通信网络对电网控制的影响,提供了一种可靠的未来电力信息物理融合系统研究技术。"
+ assert detect_text_lang(ZH_SAMPLE) == "zh"
+
+def parse_lang_name(raw: Optional[str]) -> Optional[str]:
+ """
+ Parses a language name and returns a 2-char ISO 631 language code.
+ """
+ if not raw:
+ return None
+ try:
+ lang = pycountry.languages.lookup(raw)
+ if lang.alpha_3 in ("mul", "mis"):
+ return None
+ return lang.alpha_2.lower()
+ except LookupError:
+ #print(f" unknown language: '{raw}', file=sys.stderr)
+ return None
+ except AttributeError:
+ #print(f" partial language metadata: '{lang}', file=sys.stderr)
+ return None
+ return None
+
+def test_parse_lang_name() -> None:
+
+ assert parse_lang_name(None) == None
+ assert parse_lang_name("") == None
+ assert parse_lang_name("asdf ") == None
+ assert parse_lang_name("english") == "en"
+ assert parse_lang_name("ENGLISH") == "en"
+ assert parse_lang_name("asdf blah") is None
+ assert parse_lang_name("en") == "en"
+ assert parse_lang_name("EN") == "en"
+ assert parse_lang_name("ENG") == "en"
+ assert parse_lang_name("English") == "en"
+ assert parse_lang_name("Portuguese") == "pt"
+
+
+def parse_country_name(s: Optional[str]) -> Optional[str]:
+ """
+ Parses a country name into a ISO country code (2-char).
+
+ This version copied from the chocula repository.
+ """
+ if not s or s in ("Unknown"):
+ return None
+
+ s = s.strip()
+ if s.lower() in ("usa", "new york (state)", "washington (state)"):
+ return "us"
+ if s.lower() in ("russia (federation)", "russia"):
+ return "ru"
+ if s == "Québec (Province)":
+ s = "Canada"
+ if s == "China (Republic : 1949- )":
+ return "tw"
+ if s == "Brunei":
+ return "bn"
+ if s.startswith("Congo "):
+ s = "Congo"
+ if s.lower() == "iran":
+ return "ir"
+ if s.lower() == "bermuda islands":
+ return "bm"
+ if s.lower() == "burma":
+ s = "myanmar"
+ if s.lower() in ("korea (south)", "south korea"):
+ return "kr"
+ if s.lower() in ("england", "scotland", "wales"):
+ return "uk"
+ s = s.replace(" (Republic)", "").replace(" (Federation)", "")
+
+ try:
+ country = pycountry.countries.lookup(s)
+ except LookupError:
+ country = None
+
+ if country:
+ return country.alpha_2.lower()
+ try:
+ sub = pycountry.subdivisions.lookup(s)
+ except LookupError:
+ sub = None
+
+ s = s.replace(" (State)", "").replace(" (Province)", "")
+ if sub:
+ return sub.country_code.lower()
+
+ else:
+ # print(f"unknown country: {s}", file=sys.stderr)
+ return None
+
+
+def test_parse_country_name():
+ assert parse_country_name("") is None
+ assert parse_country_name("asdf blah") is None
+ assert parse_country_name("us") == "us"
+ assert parse_country_name("USA") == "us"
+ assert parse_country_name("United States of America") == "us"
+ assert parse_country_name("united States") == "us"
+ assert parse_country_name("Massachusetts") == "us"
+ assert parse_country_name("Russia") == "ru"
+ assert parse_country_name("Japan") == "jp"
+
+# These are very close, but maybe not exactly 1-to-1 with 639-2? Some mix of
+# 2/T and 2/B?
+# PubMed/MEDLINE and JSTOR use these MARC codes
+# https://www.loc.gov/marc/languages/language_name.html
+LANG_MAP_MARC = {
+ 'afr': 'af',
+ 'alb': 'sq',
+ 'amh': 'am',
+ 'ara': 'ar',
+ 'arm': 'hy',
+ 'aze': 'az',
+ 'ben': 'bn',
+ 'bos': 'bs',
+ 'bul': 'bg',
+ 'cat': 'ca',
+ 'chi': 'zh',
+ 'cze': 'cs',
+ 'dan': 'da',
+ 'dut': 'nl',
+ 'eng': 'en',
+ 'epo': 'eo',
+ 'est': 'et',
+ 'fin': 'fi',
+ 'fre': 'fr',
+ 'geo': 'ka',
+ 'ger': 'de',
+ 'gla': 'gd',
+ 'gre': 'el',
+ 'heb': 'he',
+ 'hin': 'hi',
+ 'hrv': 'hr',
+ 'hun': 'hu',
+ 'ice': 'is',
+ 'ind': 'id',
+ 'ita': 'it',
+ 'jpn': 'ja',
+ 'kin': 'rw',
+ 'kor': 'ko',
+ 'lat': 'la',
+ 'lav': 'lv',
+ 'lit': 'lt',
+ 'mac': 'mk',
+ 'mal': 'ml',
+ 'mao': 'mi',
+ 'may': 'ms',
+ 'nor': 'no',
+ 'per': 'fa',
+ 'per': 'fa',
+ 'pol': 'pl',
+ 'por': 'pt',
+ 'pus': 'ps',
+ 'rum': 'ro',
+ 'rus': 'ru',
+ 'san': 'sa',
+ 'slo': 'sk',
+ 'slv': 'sl',
+ 'spa': 'es',
+ 'srp': 'sr',
+ 'swe': 'sv',
+ 'tha': 'th',
+ 'tur': 'tr',
+ 'ukr': 'uk',
+ 'urd': 'ur',
+ 'vie': 'vi',
+ 'wel': 'cy',
+
+# additions
+ 'gle': 'ga', # "Irish" (Gaelic)
+ 'jav': 'jv', # Javanese
+ 'welsh': 'cy', # Welsh
+ 'oci': 'oc', # Occitan
+
+# Don't have ISO 639-1 codes
+ 'grc': 'el', # Ancient Greek; map to modern greek
+ 'map': None, # Austronesian (collection)
+ 'syr': None, # Syriac, Modern
+ 'gem': None, # Old Saxon
+ 'non': None, # Old Norse
+ 'emg': None, # Eastern Meohang
+ 'neg': None, # Negidal
+ 'mul': None, # Multiple languages
+ 'und': None, # Undetermined
+}
diff --git a/python/fatcat_tools/transforms/elasticsearch.py b/python/fatcat_tools/transforms/elasticsearch.py
index dfb5f799..96a5b96b 100644
--- a/python/fatcat_tools/transforms/elasticsearch.py
+++ b/python/fatcat_tools/transforms/elasticsearch.py
@@ -213,6 +213,9 @@ def release_to_elasticsearch(entity, force_bool=True):
t['best_pdf_url'] = best_pdf_url or good_pdf_url or any_pdf_url
t['ia_pdf_url'] = ia_pdf_url
+ if release.ext_ids.doaj:
+ is_oa = True
+
if release.license_slug:
# TODO: more/better checks here, particularly strict *not* OA licenses
if release.license_slug.startswith("CC-"):
diff --git a/python/fatcat_web/templates/release_view.html b/python/fatcat_web/templates/release_view.html
index 7836ef77..cc9cf5fe 100644
--- a/python/fatcat_web/templates/release_view.html
+++ b/python/fatcat_web/templates/release_view.html
@@ -308,7 +308,7 @@ accessible version.
</div>
{% endif %}
-{% if release.number or release.ext_ids.doi or release.ext_ids.pmid or release.ext_ids.pmcid or release.ext_ids.wikidata_qid or release.ext_ids.isbn13 or release.ext_ids.arxiv or release.ext_ids.jstor or release.ext_ids.ark %}
+{% if release.number or release.ext_ids.doi or release.ext_ids.pmid or release.ext_ids.pmcid or release.ext_ids.wikidata_qid or release.ext_ids.isbn13 or release.ext_ids.arxiv or release.ext_ids.jstor or release.ext_ids.ark or release.ext_ids.doaj or release.ext_ids.dblp %}
<div class="ui segment attached" style="word-wrap: break-word;">
{% if release.number %}
<b>Number</b> &nbsp;<code>{{ release.number }}</code><br>
@@ -337,6 +337,12 @@ accessible version.
{% if release.ext_ids.ark != None %}
<b>ARK</b> &nbsp;<a href="https://n2t.net/{{ release.ext_ids.ark }}"><code>{{ release.ext_ids.ark }}</code></a><br>
{% endif %}
+{% if release.ext_ids.doaj != None %}
+ <b>DOAJ</b> &nbsp;<a href="https://doaj.org/article/{{ release.ext_ids.doaj }}" title="{{ release.ext_ids.doaj }}"><code>{{ release.ext_ids.doaj[:20] }}...</code></a><br>
+{% endif %}
+{% if release.ext_ids.dblp != None %}
+ <b>dblp</b> &nbsp;<a href="https://dblp.org/rec/{{ release.ext_ids.dblp }}.html"><code>{{ release.ext_ids.dblp }}</code></a><br>
+{% endif %}
</div>
{% endif %}
diff --git a/python/tests/files/example_doaj_articles.json b/python/tests/files/example_doaj_articles.json
new file mode 100644
index 00000000..018a4800
--- /dev/null
+++ b/python/tests/files/example_doaj_articles.json
@@ -0,0 +1,5 @@
+{"last_updated":"2020-02-04T14:11:44Z","bibjson":{"identifier":[{"id":"0264-1275","type":"pissn"},{"id":"10.1016/j.matdes.2016.06.110","type":"DOI"}],"journal":{"volume":"108","number":"","country":"GB","license":[{"open_access":true,"title":"CC BY-NC-ND","type":"CC BY-NC-ND","url":"https://www.elsevier.com/journals/materials-and-design/0264-1275/open-access-journal"}],"issns":["0264-1275","1873-4197"],"publisher":"Elsevier","language":["EN"],"title":"Materials & Design"},"month":"10","end_page":"617","year":"2016","start_page":"608","subject":[{"code":"TA401-492","scheme":"LCC","term":"Materials of engineering and construction. Mechanics of materials"}],"author":[{"affiliation":"State Key Laboratory for Mechanical Behavior of Materials, School of Materials Science and Engineering, Xi'an Jiaotong University, Xi'an 710049, China","name":"Xinfeng Li"},{"affiliation":"Department of Geosciences, Center for Materials by Design, State University of New York, Stony Brook, NY 11794-2100, USA","name":"Jin Zhang"},{"affiliation":"School of Chemical Engineering & Technology, China University of Mining and Technology, Xuzhou 221116, China","name":"Yanfei Wang"},{"affiliation":"State Key Laboratory for Mechanical Behavior of Materials, School of Materials Science and Engineering, Xi'an Jiaotong University, Xi'an 710049, China","name":"Sicong Shen"},{"affiliation":"State Key Laboratory for Mechanical Behavior of Materials, School of Materials Science and Engineering, Xi'an Jiaotong University, Xi'an 710049, China; Corresponding author.","name":"Xiaolong Song"}],"link":[{"type":"fulltext","url":"http://www.sciencedirect.com/science/article/pii/S0264127516308723"}],"abstract":"The tensile properties and fracture behavior of PH 13-8 Mo steel after subjected to pre-charged hydrogen were investigated by slow strain rate tensile tests. The results suggest that hydrogen slightly increases yield strength, while decreases tensile strength. The susceptibility to hydrogen embrittlement of specimens aged at 650 °C firstly reduces and then increases as the aging time increases, reaching the lowest value at aging time 4 h. This is dominantly attributed to the highest content of austenite. Moreover, hydrogen-induced crack nucleation sites initiate from lath, packet and prior austenite grain boundaries. Crack propagation passes through lath boundaries and walks along packet, prior austenite grain boundaries. Scanning electron microscopy result indicates that hydrogen-charged specimens show quasi-cleavage fracture and intergranular fracture in annular brittle zone while dimple fracture is observed in hydrogen-free specimens. Keywords: Hydrogen embrittlement, PH 13-8 Mo steel, Aging time, Fracture behavior","title":"Effect of hydrogen on tensile properties and fracture behavior of PH 13-8 Mo steel"},"created_date":"2019-06-05T05:25:15Z","id":"e58f08a11ecb495ead55a44ad4f89808"}
+{"last_updated":"2020-02-04T08:06:42Z","bibjson":{"identifier":[{"id":"2072-6694","type":"eissn"},{"id":"10.3390/cancers9080107","type":"doi"}],"journal":{"volume":"9","number":"8","country":"CH","license":[{"open_access":true,"title":"CC BY","type":"CC BY","url":"http://www.mdpi.com/journal/cancers/about"}],"issns":["2072-6694"],"publisher":"MDPI AG","language":["EN"],"title":"Cancers"},"month":"8","keywords":["ALK rearrangement, lung cancer, biology, immunohistochemistry, FISH, molecular biology."],"year":"2017","start_page":"107","subject":[{"code":"RC254-282","scheme":"LCC","term":"Neoplasms. Tumors. Oncology. Including cancer and carcinogens"}],"author":[{"affiliation":"Laboratory of Clinical and Experimental Pathology, Pasteur Hospital, 30 avenue de la voie romaine, 06001 Nice cedex 01, France","name":"Paul Hofman"}],"link":[{"content_type":"pdf","type":"fulltext","url":"https://www.mdpi.com/2072-6694/9/8/107"}],"abstract":"Patients with advanced-stage non-small cell lung carcinoma (NSCLC) harboring an ALK rearrangement, detected from a tissue sample, can benefit from targeted ALK inhibitor treatment. Several increasingly effective ALK inhibitors are now available for treatment of patients. However, despite an initial favorable response to treatment, in most cases relapse or progression occurs due to resistance mechanisms mainly caused by mutations in the tyrosine kinase domain of ALK. The detection of an ALK rearrangement is pivotal and can be done using different methods, which have variable sensitivity and specificity depending, in particular, on the quality and quantity of the patient’s sample. This review will first highlight briefly some information regarding the pathobiology of an ALK rearrangement and the epidemiology of patients harboring this genomic alteration. The different methods used to detect an ALK rearrangement as well as their advantages and disadvantages will then be examined and algorithms proposed for detection in daily routine practice.","title":"ALK in Non-Small Cell Lung Cancer (NSCLC) Pathobiology, Epidemiology, Detection from Tumor Tissue and Algorithm Diagnosis in a Daily Practice"},"admin":{"seal":true},"created_date":"2018-10-26T07:49:34Z","id":"937c7aa790e048d4ae5f53a2ad71f0dc"}
+{"last_updated":"2020-02-04T13:43:13Z","bibjson":{"identifier":[{"id":"1178-2013","type":"pissn"}],"end_page":"818","keywords":["bioconjugation","biosurfactant","cancer therapy","folic acid receptor","graphene quantum dots","theranostic tool"],"year":"2019","subject":[{"code":"R5-920","scheme":"LCC","term":"Medicine (General)"}],"author":[{"name":"Bansal S"},{"name":"Singh J"},{"name":"Kumari U"},{"name":"Kaur IP"},{"name":"Barnwal RP"},{"name":"Kumar R"},{"name":"Singh S"},{"name":"Singh G"},{"name":"Chatterjee M"}],"link":[{"content_type":"html","type":"fulltext","url":"https://www.dovepress.com/development-of-biosurfactant-based-graphene-quantum-dot-conjugate-as-a-peer-reviewed-article-IJN"}],"abstract":"Smriti Bansal,1 Joga Singh,2 Uma Kumari,3 Indu Pal Kaur,2 Ravi Pratap Barnwal,4 Ravinder Kumar,3 Suman Singh,5 Gurpal Singh,2 Mary Chatterjee1 1Biotechnology Engineering, University Institute of Engineering &amp; Technology, Panjab University, Chandigarh, India; 2Department of Pharmaceutical Sciences, University Institute of Pharmaceutical Sciences, Panjab University, Chandigarh, India; 3Department of Zoology, Panjab University, Chandigarh, India; 4Department of Biophysics, Panjab University, Chandigarh, India; 5Department of Agronomics, Central Scientific Instruments Organisation, Chandigarh, India Background: Biosurfactants are amphipathic molecules of microbial origin that reduce surface and interfacial tension at gas&ndash;liquid&ndash;solid interfaces. Earlier, the biosurfactant was isolated and characterized in our laboratory from Candida parapsilosis. The property of the biosurfactant is further explored in this study by using quantum dots (QDs) as nanocarrier.Materials and methods: Graphene quantum dots (GQDs) were synthesized by bottom-up approach through pyrolysis of citric acid. GQDs were conjugated with both biosurfactant and folic acid (FA) using carbodiimide chemistry. The prepared GQD bioconjugate was studied for diagnostic and therapeutic effects against cancer cells.Results and discussion: Photoluminescence quantum yield (QY) of plain GQDs was measured as 12.8%. QY for biosurfactant conjugated GQDs and FA-biosurfactant conjugated GQDs was measured as 10.4% and 9.02%, respectively, and it was sufficient for targeting cancer cells. MTT assay showed that more than 90% of cells remained viable at concentration of 1 mg/mL, hence GQDs seemed to be non-toxic to cells. Biosurfactant conjugated GQDs caused 50% reduction in cellular viability within 24 hours. FA conjugation further increased the specificity of bioconjugated GQDs toward tumor cells, which is clearly evident from the drug internalization studies using confocal laser scanning microscopy. A higher amount of drug uptake was observed when bioconjugated GQDs were decorated with FA.Conclusion: The ability of GQD bioconjugate could be used as a theranostic tool for cancer. It is foreseen that in near future cancer can be detected and/or treated at an early stage by utilizing biosurfactant conjugated GQDs. Therefore, the proposed study would provide a stepping stone to improve the life of cancer patients. Keywords: bioconjugation, nanomedicine, nanocarrier, cancer therapy, folic acid receptor, graphene quantum dots","title":"Development of biosurfactant-based graphene quantum dot conjugate as a novel and fluorescent theranostic tool for cancer","journal":{"volume":"Volume 14","country":"GB","license":[{"open_access":true,"title":"CC BY-NC","type":"CC BY-NC","url":"https://www.dovepress.com/author_guidelines.php?content_id=695"}],"issns":["1176-9114","1178-2013"],"publisher":"Dove Medical Press","language":["EN"],"title":"International Journal of Nanomedicine"},"month":"1","start_page":"809"},"created_date":"2019-01-29T18:43:40Z","id":"e0173c80437f4fb88ec4e02e453e13b0"}
+{"last_updated":"2020-02-04T09:46:14Z","bibjson":{"identifier":[{"id":"1424-8220","type":"eissn"},{"id":"10.3390/s18124467","type":"doi"}],"journal":{"volume":"18","number":"12","country":"CH","license":[{"open_access":true,"title":"CC BY","type":"CC BY","url":"http://www.mdpi.com/journal/sensors/about"}],"issns":["1424-8220"],"publisher":"MDPI AG","language":["EN"],"title":"Sensors"},"month":"12","keywords":["multilayer sea ice temperature","low temperature","design","performance analysis"],"year":"2018","start_page":"4467","subject":[{"code":"TP1-1185","scheme":"LCC","term":"Chemical technology"}],"author":[{"affiliation":"College of Electrical and Power Engineering, Taiyuan University of Technology, Taiyuan 030024, China","name":"Guangyu Zuo"},{"affiliation":"College of Electrical and Power Engineering, Taiyuan University of Technology, Taiyuan 030024, China","name":"Yinke Dou"},{"affiliation":"College of Water Resources Science and Engineering, Taiyuan University of Technology, Taiyuan 030024, China","name":"Xiaomin Chang"},{"affiliation":"College of Electrical and Power Engineering, Taiyuan University of Technology, Taiyuan 030024, China","name":"Yan Chen"},{"affiliation":"College of Electrical and Power Engineering, Taiyuan University of Technology, Taiyuan 030024, China","name":"Chunyan Ma"}],"link":[{"content_type":"pdf","type":"fulltext","url":"https://www.mdpi.com/1424-8220/18/12/4467"}],"abstract":"Temperature profiles of sea ice have been recorded more than a few decades. However, few high-precision temperature sensors can complete the observation of temperature profile of sea ice, especially in extreme environments. At present, the most widely used sea ice observation instruments can reach an accuracy of sea ice temperature measurement of 0.1 &#176;C. In this study, a multilayer sea ice temperature sensor is developed with temperature measurement accuracy from &#8722;0.0047 &#176;C to 0.0059 &#176;C. The sensor system composition, structure of the thermistor string, and work mode are analyzed. The performance of the sensor system is evaluated from &#8722;50 &#176;C to 30 &#176;C. The temperature dependence of the constant current source, the amplification circuit, and the analog-to-digital converter (ADC) circuit are comprehensive tested and quantified. A temperature correction algorithm is designed to correct any deviation in the sensor system. A sea-ice thickness discrimination algorithm is proposed in charge of determining the thickness of sea ice automatically. The sensor system was field tested in Wuliangsuhai, Yellow River on 31 January 2018 and the second reservoir of Fen River, Yellow River on 30 January 2018. The integral practicality of this sensor system is identified and examined. The multilayer sea ice temperature sensor will provide good temperature results of sea ice and maintain stable performance in the low ambient temperature.","title":"Design and Performance Analysis of a Multilayer Sea Ice Temperature Sensor Used in Polar Region"},"admin":{"seal":true},"created_date":"2018-12-18T08:13:29Z","id":"152f83d12b9f477696e681684ba696e7"}
+{"last_updated":"2020-06-02T23:02:32Z","bibjson":{"identifier":[{"id":"10.123/abc","type":"doi"},{"id":"2076-3417","type":"eissn"}],"journal":{"volume":"10","number":"3872","country":"CH","license":[{"open_access":true,"title":"CC BY","type":"CC BY","url":"http://www.mdpi.com/about/openaccess"}],"issns":["2076-3417"],"publisher":"MDPI AG","language":["EN"],"title":"Applied Sciences"},"month":"06","keywords":["Smart parking systems","survey","vehicle routing problem","vehicle detection techniques","routing algorithms"],"year":"2020","start_page":"3872","subject":[{"code":"T","scheme":"LCC","term":"Technology"},{"code":"TA1-2040","scheme":"LCC","term":"Engineering (General). Civil engineering (General)"},{"code":"QH301-705.5","scheme":"LCC","term":"Biology (General)"},{"code":"QC1-999","scheme":"LCC","term":"Physics"},{"code":"QD1-999","scheme":"LCC","term":"Chemistry"}],"author":[{"affiliation":"Institute of Computer Science. Faculty of Exact, Physical and Natural Sciences. National University of San Juan, 5400 San Juan, Argentina","name":"Mathias Gabriel Diaz Ogás"},{"affiliation":"Institute of Informatics and Applications. University of Girona, 17003 Girona, Spain","name":"Ramon Fabregat"},{"affiliation":"Institute of Computer Science. Faculty of Exact, Physical and Natural Sciences. National University of San Juan, 5400 San Juan, Argentina","name":"Silvana Aciar"}],"link":[{"content_type":"text/html","type":"fulltext","url":"https://www.mdpi.com/2076-3417/10/11/3872"}],"abstract":"The large number of vehicles constantly seeking access to congested areas in cities means that finding a public parking place is often difficult and causes problems for drivers and citizens alike. In this context, strategies that guide vehicles from one point to another, looking for the most optimal path, are needed. Most contributions in the literature are routing strategies that take into account different criteria to select the optimal route required to find a parking space. This paper aims to identify the types of smart parking systems (SPS) that are available today, as well as investigate the kinds of vehicle detection techniques (VDT) they have and the algorithms or other methods they employ, in order to analyze where the development of these systems is at today. To do this, a survey of 274 publications from January 2012 to December 2019 was conducted. The survey considered four principal features: SPS types reported in the literature, the kinds of VDT used in these SPS, the algorithms or methods they implement, and the stage of development at which they are. Based on a search and extraction of results methodology, this work was able to effectively obtain the current state of the research area. In addition, the exhaustive study of the studies analyzed allowed for a discussion to be established concerning the main difficulties, as well as the gaps and open problems detected for the SPS. The results shown in this study may provide a base for future research on the subject.","title":"Survey of Smart Parking Systems"},"admin":{"seal":true},"id":"9cf511bab39445ba9745feb43d7493dd","created_date":"2020-06-03T00:02:28Z"}
diff --git a/python/tests/import_doaj.py b/python/tests/import_doaj.py
new file mode 100644
index 00000000..d69aebd7
--- /dev/null
+++ b/python/tests/import_doaj.py
@@ -0,0 +1,142 @@
+
+import json
+import datetime
+
+import pytest
+import fatcat_openapi_client
+
+from fatcat_tools.importers import DoajArticleImporter, JsonLinePusher
+from fatcat_tools.transforms import entity_to_dict
+from fixtures import *
+
+
+@pytest.fixture(scope="function")
+def doaj_importer(api):
+ with open("tests/files/ISSN-to-ISSN-L.snip.txt", "r") as issn_file:
+ yield DoajArticleImporter(api, issn_file, bezerk_mode=True)
+
+def test_doaj_importer(doaj_importer):
+ last_index = doaj_importer.api.get_changelog(limit=1)[0].index
+ with open("tests/files/example_doaj_articles.json", "r") as f:
+ doaj_importer.bezerk_mode = True
+ counts = JsonLinePusher(doaj_importer, f).run()
+ assert counts["insert"] == 5
+ assert counts["exists"] == 0
+ assert counts["skip"] == 0
+ success_changelog = doaj_importer.api.get_changelog(limit=1)[0]
+ assert last_index + 1 == success_changelog.index
+
+ # fetch most recent editgroup
+ change = doaj_importer.api.get_changelog_entry(index=last_index + 1)
+ eg = change.editgroup
+ assert eg.description
+ assert "doaj" in eg.description.lower()
+ assert eg.extra["git_rev"]
+ assert "fatcat_tools.DoajArticleImporter" in eg.extra["agent"]
+
+ last_index = doaj_importer.api.get_changelog(limit=1)[0].index
+ with open("tests/files/example_doaj_articles.json", "r") as f:
+ doaj_importer.bezerk_mode = False
+ doaj_importer.reset()
+ counts = JsonLinePusher(doaj_importer, f).run()
+ assert counts["insert"] == 0
+ assert counts["exists"] == 5
+ assert counts["skip"] == 0
+ assert last_index == doaj_importer.api.get_changelog(limit=1)[0].index
+
+ # cleanup file entities (so other import tests work)
+ success_editgroup = doaj_importer.api.get_editgroup(success_changelog.editgroup_id)
+ eg = quick_eg(doaj_importer.api)
+ for release_edit in success_editgroup.edits.releases:
+ doaj_importer.api.delete_release(eg.editgroup_id, release_edit.ident)
+ doaj_importer.api.accept_editgroup(eg.editgroup_id)
+
+def test_doaj_importer_existing_doi(doaj_importer):
+ """
+ One of the DOAJ test entities has a dummy DOI (10.123/abc); this test
+ ensures that it isn't clobbered, an then that it gets updated.
+ """
+ with open("tests/files/example_doaj_articles.json", "r") as f:
+ doaj_importer.reset()
+ doaj_importer.bezerk_mode = False
+ doaj_importer.do_updates = False
+ counts = JsonLinePusher(doaj_importer, f).run()
+ print(counts)
+ assert counts["insert"] == 4
+ assert counts["exists"] == 1
+ assert counts["skip"] == 0
+ success_changelog = doaj_importer.api.get_changelog(limit=1)[0]
+ success_editgroup = doaj_importer.api.get_editgroup(success_changelog.editgroup_id)
+
+ with open("tests/files/example_doaj_articles.json", "r") as f:
+ doaj_importer.reset()
+ doaj_importer.bezerk_mode = False
+ doaj_importer.do_updates = True
+ counts = JsonLinePusher(doaj_importer, f).run()
+ print(counts)
+ assert counts["insert"] == 0
+ assert counts["exists"] == 4
+ assert counts["update"] == 1
+ update_changelog = doaj_importer.api.get_changelog(limit=1)[0]
+ update_editgroup = doaj_importer.api.get_editgroup(update_changelog.editgroup_id)
+
+ with open("tests/files/example_doaj_articles.json", "r") as f:
+ doaj_importer.reset()
+ doaj_importer.bezerk_mode = False
+ doaj_importer.do_updates = True
+ counts = JsonLinePusher(doaj_importer, f).run()
+ print(counts)
+ assert counts["insert"] == 0
+ assert counts["exists"] == 5
+ assert counts["update"] == 0
+
+ # cleanup file entities (so other import tests work)
+ eg = quick_eg(doaj_importer.api)
+ for release_edit in success_editgroup.edits.releases:
+ doaj_importer.api.delete_release(eg.editgroup_id, release_edit.ident)
+ for release_edit in update_editgroup.edits.releases:
+ print(release_edit)
+ doaj_importer.api.update_release(
+ eg.editgroup_id,
+ release_edit.ident,
+ ReleaseEntity(
+ revision=release_edit.prev_revision,
+ ext_ids=ReleaseExtIds(),
+ ),
+ )
+ doaj_importer.api.accept_editgroup(eg.editgroup_id)
+
+def test_doaj_dict_parse(doaj_importer):
+ with open("tests/files/example_doaj_articles.json", "r") as f:
+ raw = json.loads(f.readline())
+ r = doaj_importer.parse_record(raw)
+
+ assert r.title == "Effect of hydrogen on tensile properties and fracture behavior of PH 13-8 Mo steel"
+ assert r.publisher == "Elsevier"
+ assert r.release_type == "article-journal"
+ assert r.release_stage == "published"
+ assert r.license_slug == "cc-by-nc-nd"
+ assert r.original_title == None
+ assert r.ext_ids.doi == "10.1016/j.matdes.2016.06.110"
+ assert r.ext_ids.doaj == "e58f08a11ecb495ead55a44ad4f89808"
+ assert r.subtitle == None
+ assert r.release_date == None
+ assert r.release_year == 2016
+ assert r.volume == "108"
+ assert r.number == None
+ assert r.pages == "608-617"
+ assert r.version == None
+ assert r.language == "en"
+ # matched by ISSN, so wouldn't be defined normally
+ assert r.extra['container_name'] == "Materials & Design"
+ assert len(r.abstracts) == 1
+ assert len(r.abstracts[0].content) == 1033
+ assert len(r.contribs) == 5
+ assert r.contribs[0].raw_name == "Xinfeng Li"
+ assert r.contribs[0].given_name == None
+ assert r.contribs[0].surname == None
+ assert not r.refs
+
+ #print(r.extra)
+ assert r.extra['release_month'] == 10
+ assert r.extra['country'] == 'gb'