aboutsummaryrefslogtreecommitdiffstats
path: root/chocula/directories
diff options
context:
space:
mode:
authorBryan Newbold <bnewbold@archive.org>2020-05-06 18:26:53 -0700
committerBryan Newbold <bnewbold@archive.org>2020-05-07 00:59:37 -0700
commit4d701f4f2ea99ac95bd4235adef1998f3abdc9f9 (patch)
tree6408d86364109765d0deb3692321ed7f3128ea05 /chocula/directories
parentd559304babb24e4961ba13c554817730b46cfadc (diff)
downloadchocula-4d701f4f2ea99ac95bd4235adef1998f3abdc9f9.tar.gz
chocula-4d701f4f2ea99ac95bd4235adef1998f3abdc9f9.zip
start a Makefile
Move all "index" functions into classes, each in a separate file. Add lots of type annotations. Use dataclass objects to hold database rows. This aspect will need further refactoring to remove "extra" usage, probably by adding database rows to align with DatabaseInfo more closely.
Diffstat (limited to 'chocula/directories')
-rw-r--r--chocula/directories/crossref.py36
-rw-r--r--chocula/directories/directory.template32
-rw-r--r--chocula/directories/doaj.py120
-rw-r--r--chocula/directories/entrez.py39
-rw-r--r--chocula/directories/ezb.py47
-rw-r--r--chocula/directories/gold_oa.py44
-rw-r--r--chocula/directories/norwegian.py81
-rw-r--r--chocula/directories/openapc.py44
-rw-r--r--chocula/directories/road.py52
-rw-r--r--chocula/directories/sherpa_romeo.py58
-rw-r--r--chocula/directories/szczepanski.py48
-rw-r--r--chocula/directories/wikidata.py43
12 files changed, 644 insertions, 0 deletions
diff --git a/chocula/directories/crossref.py b/chocula/directories/crossref.py
new file mode 100644
index 0000000..ba47566
--- /dev/null
+++ b/chocula/directories/crossref.py
@@ -0,0 +1,36 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo
+
+
+class CrossrefLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ #"JournalTitle","JournalID","Publisher","pissn","eissn","additionalIssns","doi","(year1)[volume1]issue1,issue2,issue3(year2)[volume2]issue4,issues5"
+
+ """
+
+ source_slug = "crossref"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.CROSSREF_FILE))
+
+ def parse_record(self, record) -> Optional[DirectoryInfo]:
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issne=record['eissn'],
+ issnp=record['pissn'],
+ custom_id=record.get('doi').strip() or None,
+ name=clean_str(record.get('JournalTitle')),
+ publisher=clean_str(record.get('Publisher')),
+ )
+
+ if record['additionalIssns']:
+ info.raw_issn = record['additionalIssns'][0]
+
+ return info
diff --git a/chocula/directories/directory.template b/chocula/directories/directory.template
new file mode 100644
index 0000000..49f6a79
--- /dev/null
+++ b/chocula/directories/directory.template
@@ -0,0 +1,32 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo
+
+
+class TemplateLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ """
+
+ source_slug = "template"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.TEMPLATE_FILE))
+
+ def parse_record(self, record) -> Optional[DirectoryInfo]:
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issne=record.get('ISSN (Online)'),
+ issnp=record.get('ISSN (Print)'),
+ custom_id=record.get('NlmId').strip() or None,
+ name=clean_str(record.get('JournalTitle')),
+ abbrev=clean_str(record['IsoAbbr']),
+ )
+
+ return info
+
diff --git a/chocula/directories/doaj.py b/chocula/directories/doaj.py
new file mode 100644
index 0000000..5d1aa21
--- /dev/null
+++ b/chocula/directories/doaj.py
@@ -0,0 +1,120 @@
+
+from typing import Iterable, Optional, Dict, Any
+import csv
+
+from chocula.util import clean_str, parse_mimetypes, parse_country, parse_lang, PLATFORM_MAP
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class DoajLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ - Journal title
+ - Journal URL
+ - Alternative title
+ - Journal ISSN (print version)
+ - Journal EISSN (online version)
+ - Publisher
+ - Society or institution
+ - "Platform
+ - host or aggregator"
+ - Country of publisher
+ - Journal article processing charges (APCs)
+ - APC information URL
+ - APC amount
+ - Currency
+ - Journal article submission fee
+ - Submission fee URL
+ - Submission fee amount
+ - Submission fee currency
+ - Number of articles publish in the last calendar year
+ - Number of articles information URL
+ - Journal waiver policy (for developing country authors etc)
+ - Waiver policy information URL
+ - Digital archiving policy or program(s)
+ - Archiving: national library
+ - Archiving: other
+ - Archiving infomation URL
+ - Journal full-text crawl permission
+ - Permanent article identifiers
+ - Journal provides download statistics
+ - Download statistics information URL
+ - First calendar year journal provided online Open Access content
+ - Full text formats
+ - Keywords
+ - Full text language
+ - URL for the Editorial Board page
+ - Review process
+ - Review process information URL
+ - URL for journal's aims & scope
+ - URL for journal's instructions for authors
+ - Journal plagiarism screening policy
+ - Plagiarism information URL
+ - Average number of weeks between submission and publication
+ - URL for journal's Open Access statement
+ - Machine-readable CC licensing information embedded or displayed in articles
+ - URL to an example page with embedded licensing information
+ - Journal license
+ - License attributes
+ - URL for license terms
+ - Does this journal allow unrestricted reuse in compliance with BOAI?
+ - Deposit policy directory
+ - Author holds copyright without restrictions
+ - Copyright information URL
+ - Author holds publishing rights without restrictions
+ - Publishing rights information URL
+ - DOAJ Seal
+ - Tick: Accepted after March 2014
+ - Added on Date
+ - Subjects
+ """
+
+ source_slug = "doaj"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.DOAJ_FILE))
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+ # TODO: Subjects, Permanent article identifiers, work_level stuff
+
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issnp=row['Journal ISSN (print version)'],
+ issne=row['Journal EISSN (online version)'],
+ name=clean_str(row['Journal title']),
+ publisher=clean_str(row['Publisher']),
+ platform=PLATFORM_MAP.get(row['Platform, host or aggregator']),
+ country=parse_country(row['Country of publisher']),
+ )
+
+ lang = parse_lang(row['Full text language'])
+ if lang:
+ info.langs.append(lang)
+
+ extra: Dict[str, Any] = dict(doaj=dict())
+ extra['mimetypes'] = parse_mimetypes(row['Full text formats'])
+ extra['doaj']['as_of'] = self.config.DOAJ_DATE
+ if row['DOAJ Seal']:
+ extra['doaj']['seal'] = {"no": False, "yes": True}[row['DOAJ Seal'].lower()]
+
+ if row['Digital archiving policy or program(s)']:
+ extra['archive'] = [a.strip() for a in row['Digital archiving policy or program(s)'].split(',') if a.strip()]
+ elif row['Archiving: national library']:
+ extra['archive'] = ['national-library']
+
+ crawl_permission = row['Journal full-text crawl permission']
+ if crawl_permission:
+ extra['crawl-permission'] = dict(Yes=True, No=False)[crawl_permission]
+ default_license = row['Journal license']
+ if default_license and default_license.startswith('CC'):
+ extra['default_license'] = default_license.replace('CC ', 'CC-').strip()
+
+ url = row['Journal URL']
+ if url:
+ homepage = HomepageUrl.from_url(row['Journal URL'])
+ if homepage:
+ info.homepage_urls.append(homepage)
+ return info
+
diff --git a/chocula/directories/entrez.py b/chocula/directories/entrez.py
new file mode 100644
index 0000000..821fb1d
--- /dev/null
+++ b/chocula/directories/entrez.py
@@ -0,0 +1,39 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo
+
+
+class EntrezLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ - JrId
+ - JournalTitle
+ - MedAbbr
+ - "ISSN (Print)"
+ - "ISSN (Online)"
+ - IsoAbbr
+ - NlmId
+ """
+
+ source_slug = "entrez"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.ENTREZ_FILE))
+
+ def parse_record(self, record) -> Optional[DirectoryInfo]:
+ if not (record.get('ISSN (Online)') or record.get('ISSN (Print)')):
+ return None
+ return DirectoryInfo(
+ directory_slug=self.source_slug,
+ issne=record.get('ISSN (Online)'),
+ issnp=record.get('ISSN (Print)'),
+ custom_id=record.get('NlmId').strip() or None,
+ name=clean_str(record.get('JournalTitle')),
+ abbrev=clean_str(record['IsoAbbr']),
+ )
+
diff --git a/chocula/directories/ezb.py b/chocula/directories/ezb.py
new file mode 100644
index 0000000..c2fcb83
--- /dev/null
+++ b/chocula/directories/ezb.py
@@ -0,0 +1,47 @@
+
+from typing import Iterable, Optional
+import json
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class EzbLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ """
+
+ source_slug = "ezb"
+
+ def open_file(self) -> Iterable:
+ return open(self.config.EZB_FILE, 'r')
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+
+ if not row:
+ return None
+ row = json.loads(row)
+
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issne=row.get('issne'),
+ issnp=row.get('issnp'),
+ custom_id=row['ezb_id'],
+ name=clean_str(row['title']),
+ publisher=clean_str(row.get('publisher')),
+ )
+
+ info.extra = dict()
+ for k in ('ezb_color', 'subjects', 'keywords', 'zdb_id',
+ 'first_volume', 'first_issue', 'first_year',
+ 'appearance', 'costs'):
+ if row.get(k):
+ info.extra[k] = row[k]
+
+ url = HomepageUrl.from_url(row.get('url'))
+ if url:
+ info.homepage_urls.append(url)
+
+ return info
diff --git a/chocula/directories/gold_oa.py b/chocula/directories/gold_oa.py
new file mode 100644
index 0000000..08747bf
--- /dev/null
+++ b/chocula/directories/gold_oa.py
@@ -0,0 +1,44 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo
+
+
+class GoldOALoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ # "ISSN","ISSN_L","ISSN_IN_DOAJ","ISSN_IN_ROAD","ISSN_IN_PMC","ISSN_IN_OAPC","ISSN_IN_WOS","ISSN_IN_SCOPUS","JOURNAL_IN_DOAJ","JOURNAL_IN_ROAD","JOURNAL_IN_PMC","JOURNAL_IN_OAPC","JOURNAL_IN_WOS","JOURNAL_IN_SCOPUS","TITLE","TITLE_SOURCE"
+ """
+
+ source_slug = "gold_oa"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.GOLD_OA_FILE, encoding="ISO-8859-1"))
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+
+ if not (row.get('ISSN_L') and row.get('TITLE')):
+ return None
+
+ # TODO: also add for other non-direct indices
+ #for ind in ('WOS', 'SCOPUS'):
+ # issnl, status = self.add_issn(
+ # ind.lower(),
+ # raw_issn=row['ISSN_L'],
+ # name=row['TITLE'],
+ # )
+
+ extra = dict()
+ for ind in ('DOAJ', 'ROAD', 'PMC', 'OAPC', 'WOS', 'SCOPUS'):
+ extra['in_' + ind.lower()] = bool(int(row['JOURNAL_IN_' + ind]))
+
+ return DirectoryInfo(
+ directory_slug=self.source_slug,
+ raw_issn=row['ISSN_L'],
+ name=clean_str(row['TITLE']),
+ extra=extra,
+ )
diff --git a/chocula/directories/norwegian.py b/chocula/directories/norwegian.py
new file mode 100644
index 0000000..446baed
--- /dev/null
+++ b/chocula/directories/norwegian.py
@@ -0,0 +1,81 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str, parse_lang, parse_country
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class NorwegianLoader(DirectoryLoader):
+ """
+ CSV Columns (2020 file):
+
+ NSD tidsskrift_id
+ Original title
+ International title
+ Print ISSN
+ Online ISSN
+ Open Access
+ NPI Academic Discipline
+ NPI Scientific Field
+ Level 2020
+ Level 2019
+ Level 2018
+ Level 2017
+ Level 2016
+ Level 2015
+ Level 2014
+ Level 2013
+ Level 2012
+ Level 2011
+ Level 2010
+ Level 2009
+ Level 2008
+ Level 2007
+ Level 2006
+ Level 2005
+ Level 2004
+ itar_id
+ NSD forlag_id
+ Publishing Company
+ Publisher
+ Country of publication
+ Language
+ Conference Proceedings
+ Established
+ Ceased
+ URL
+
+ """
+
+ source_slug = "norwegian"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.NORWEGIAN_FILE, encoding="ISO-8859-1"), delimiter=";")
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issnp=row['Print ISSN'],
+ issne=row['Online ISSN'],
+ country=parse_country(row['Country of publication']),
+ name=clean_str(row.get('International title')),
+ langs=[l for l in [parse_lang(row['Language'])] if l],
+ )
+
+ info.extra['norwegian'] = dict(as_of=self.config.NORWEGIAN_DATE)
+ if row['Level 2019']:
+ info.extra['norwegian']['level'] = int(row['Level 2019'])
+
+ if row['Original title'] != row['International title']:
+ info.original_name = clean_str(row['Original title'])
+
+ identifier=row['NSD tidsskrift_id'],
+ publisher=row['Publisher'],
+
+ url = HomepageUrl.from_url(row['URL'])
+ if url:
+ info.homepage_urls.append(url)
+
+ return info
diff --git a/chocula/directories/openapc.py b/chocula/directories/openapc.py
new file mode 100644
index 0000000..b5cc691
--- /dev/null
+++ b/chocula/directories/openapc.py
@@ -0,0 +1,44 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class OpenAPCLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ # "institution","period","euro","doi","is_hybrid","publisher","journal_full_title","issn","issn_print","issn_electronic","issn_l","license_ref","indexed_in_crossref","pmid","pmcid","ut","url","doaj"
+ """
+
+ source_slug = "openapc"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.OPENAPC_FILE))
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+
+ if not row.get('issn'):
+ return None
+
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issne=row['issn_electronic'],
+ issnp=row['issn_print'],
+ raw_issn=row['issn_l'] or row['issn'],
+ name=clean_str(row['journal_full_title']),
+ publisher=clean_str(row['publisher']),
+ )
+
+ info.extra['is_hybrid'] = bool(row['is_hybrid'])
+
+ homepage = HomepageUrl.from_url(row['url'])
+ if homepage:
+ info.homepage_urls.append(homepage)
+
+ return info
+
+
diff --git a/chocula/directories/road.py b/chocula/directories/road.py
new file mode 100644
index 0000000..66bd7d0
--- /dev/null
+++ b/chocula/directories/road.py
@@ -0,0 +1,52 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class RoadLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ - ISSN
+ - ISSN-L
+ - Short Title
+ - Title
+ - Publisher
+ - URL1
+ - URL2
+ - Region
+ - Lang1
+ - Lang2
+ """
+
+ source_slug = "road"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.ROAD_FILE), delimiter='\t',
+ fieldnames=("ISSN", "ISSN-L", "Short Title", "Title", "Publisher", "URL1", "URL2", "Region", "Lang1", "Lang2")
+ )
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ raw_issn=row['ISSN-L'],
+ name=clean_str(row['Short Title']),
+ publisher=clean_str(row['Publisher']),
+ langs=[l for l in (row['Lang1'], row['Lang2']) if l],
+ )
+
+ # TODO: region mapping: "Europe and North America"
+ # TODO: lang mapping: already alpha-3
+
+ # homepages
+ for url in [u for u in (row['URL1'], row['URL2']) if u]:
+ homepage = HomepageUrl.from_url(url)
+ if homepage:
+ info.homepage_urls.append(homepage)
+
+ return info
+
diff --git a/chocula/directories/sherpa_romeo.py b/chocula/directories/sherpa_romeo.py
new file mode 100644
index 0000000..618e389
--- /dev/null
+++ b/chocula/directories/sherpa_romeo.py
@@ -0,0 +1,58 @@
+
+import sys
+from typing import Iterable, Optional, Dict, Any
+import csv
+
+import ftfy
+
+from chocula.util import clean_str, parse_country
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo
+
+
+class SherpaRomeoLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ #RoMEO Record ID,Publisher,Policy Heading,Country,RoMEO colour,Published Permission,Published Restrictions,Published Max embargo,Accepted Prmission,Accepted Restrictions,Accepted Max embargo,Submitted Permission,Submitted Restrictions,Submitted Max embargo,Open Access Publishing,Record Status,Updated
+
+ #Journal Title,ISSN,ESSN,URL,RoMEO Record ID,Updated
+
+ """
+
+ source_slug = "sherpa_romeo"
+ sherpa_policies: Dict[str, Any] = dict()
+
+ def open_file(self) -> Iterable:
+
+ # first load policies
+ print("##### Loading SHERPA/ROMEO policies...", file=sys.stderr)
+ fixed_policy_file = ftfy.fix_file(open(self.config.SHERPA_ROMEO_POLICY_FILE, 'rb'))
+ policy_reader = csv.DictReader(fixed_policy_file)
+ for row in policy_reader:
+ self.sherpa_policies[row['RoMEO Record ID']] = row
+
+ # then open regular file
+ raw_file = open(self.config.SHERPA_ROMEO_JOURNAL_FILE, 'rb').read().decode(errors='replace')
+ fixed_file = ftfy.fix_text(raw_file)
+ return csv.DictReader(fixed_file.split('\n'))
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+ # super mangled :(
+
+ row.update(self.sherpa_policies[row['RoMEO Record ID']])
+
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issnp=row['ISSN'],
+ issne=row['ESSN'],
+ name=clean_str(row['Journal Title']),
+ publisher=clean_str(row['Publisher']),
+ country=parse_country(row['Country']),
+ custom_id=row['RoMEO Record ID'],
+ )
+
+ if row['RoMEO colour']:
+ info.extra['sherpa_romeo'] = dict(color=row['RoMEO colour'])
+
+ return info
diff --git a/chocula/directories/szczepanski.py b/chocula/directories/szczepanski.py
new file mode 100644
index 0000000..b199c34
--- /dev/null
+++ b/chocula/directories/szczepanski.py
@@ -0,0 +1,48 @@
+
+from typing import Iterable, Optional
+import json
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class SzczepanskiLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ """
+
+ source_slug = "szczepanski"
+
+ def open_file(self) -> Iterable:
+ return open(self.config.SZCZEPANSKI_FILE, 'r')
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+
+ if not row:
+ return None
+
+ row = json.loads(row)
+
+ info = DirectoryInfo(
+ directory_slug=self.source_slug,
+ issne=row.get('issne'),
+ issnp=row.get('issnp'),
+ raw_issn=row.get('issn'),
+ name=clean_str(row['title']),
+ publisher=clean_str(row.get('ed')),
+ )
+
+ info.extra['szczepanski'] = dict(as_of=self.config.SZCZEPANSKI_DATE)
+ if row.get('extra'):
+ info.extra['szczepanski']['notes'] = row.get('extra')
+ for k in ('other_titles', 'year_spans', 'ed'):
+ if row.get(k):
+ info.extra['szczepanski'][k] = row[k]
+
+ url = HomepageUrl.from_url(row.get('url'))
+ if url:
+ info.homepage_urls.append(url)
+
+ return info
diff --git a/chocula/directories/wikidata.py b/chocula/directories/wikidata.py
new file mode 100644
index 0000000..278192b
--- /dev/null
+++ b/chocula/directories/wikidata.py
@@ -0,0 +1,43 @@
+
+from typing import Iterable, Optional
+import csv
+
+from chocula.util import clean_str
+from chocula.common import DirectoryLoader
+from chocula.database import DirectoryInfo, HomepageUrl
+
+
+class WikidataLoader(DirectoryLoader):
+ """
+ CSV Columns:
+
+ """
+
+ source_slug = "wikidata"
+
+ def open_file(self) -> Iterable:
+ return csv.DictReader(open(self.config.WIKIDATA_SPARQL_FILE), delimiter='\t')
+
+ def parse_record(self, row) -> Optional[DirectoryInfo]:
+
+ if not (row.get('issn') and row.get('title')):
+ return None
+ wikidata_qid = row['item'].strip().split('/')[-1]
+ publisher = row['publisher_name']
+ if (publisher.startswith('Q') and publisher[1].isdigit()) or publisher.startswith('t1') or not publisher:
+ publisher = None
+ info =DirectoryInfo(
+ directory_slug=self.source_slug,
+ raw_issn=row['issn'],
+ custom_id=wikidata_qid,
+ name=clean_str(row['title']),
+ publisher=clean_str(publisher),
+ )
+ if row.get('start_year'):
+ info.extra['start_year'] = row['start_year']
+
+ url = HomepageUrl.from_url(row.get('websiteurl'))
+ if url:
+ info.homepage_urls.append(url)
+
+ return info