1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
|
// Filter to parse out a correctly looking DOI, URL, etc from a field.
//
// $ echo "1,xxx 10.123/12312 xxx,3" | skate-to-doi -c doi -d , -f 2
// 1,10.123/12312,3k
//
// We can use this to sanitize fields in the reference dataset.
package main
import (
"flag"
"fmt"
"log"
"os"
"regexp"
"runtime"
"strings"
"git.archive.org/martin/cgraph/skate"
"git.archive.org/martin/cgraph/skate/parallel"
"mvdan.cc/xurls/v2"
)
var (
numWorkers = flag.Int("w", runtime.NumCPU(), "number of workers")
batchSize = flag.Int("b", 100000, "batch size")
delimiter = flag.String("d", "\t", "delimiter")
index = flag.Int("f", 1, "one field to cleanup up a doi, 1-indexed")
bestEffort = flag.Bool("B", false, "only log errors, but do not stop")
skipNonMatches = flag.Bool("S", false, "do not emit a line for non-matches")
what = flag.String("c", "doi", "what to clean: doi, url")
extendedCleanup = flag.Bool("X", false, "extended (and slower) cleanup for urls")
allow = flag.String("allow", "http,https", "comma separted list of schemas to allow for urls")
PatDOI = regexp.MustCompile(`10[.][0-9]{1,8}/[^ ]*[\w]`)
rxRelaxed = xurls.Relaxed()
allowedSchemas []string // parsed from allow flag
)
func main() {
flag.Parse()
for _, v := range strings.Split(*allow, ",") {
allowedSchemas = append(allowedSchemas, strings.TrimSpace(v))
}
var f func([]byte) ([]byte, error)
switch *what {
case "doi":
f = doiFilter
case "url":
f = urlFilter
default:
f = doiFilter
}
pp := parallel.NewProcessor(os.Stdin, os.Stdout, f)
pp.NumWorkers = *numWorkers
pp.BatchSize = *batchSize
if err := pp.Run(); err != nil {
log.Fatal(err)
}
}
// urlFilter parses finds the first URL.
func urlFilter(p []byte) ([]byte, error) {
parts := strings.Split(string(p), *delimiter)
if len(parts) < *index {
msg := fmt.Sprintf("warn: line has too few fields (%d): %s", len(parts), string(p))
if *bestEffort {
log.Println(msg)
return nil, nil
} else {
return nil, fmt.Errorf(msg)
}
}
url := rxRelaxed.FindString(parts[*index-1])
if *extendedCleanup {
url = skate.SanitizeURL(url)
}
if url == "" && *skipNonMatches {
return nil, nil
}
if len(allowedSchemas) > 0 && !skate.HasAnyPrefix(url, allowedSchemas) {
return nil, nil
}
if len(parts) == 1 || *index == len(parts) {
url = url + "\n"
}
parts[*index-1] = url
return []byte(strings.Join(parts, *delimiter)), nil
}
// doiFilter finds a DOI, normalizes to lowercase.
func doiFilter(p []byte) ([]byte, error) {
parts := strings.Split(string(p), *delimiter)
if len(parts) < *index {
msg := fmt.Sprintf("warn: line has too few fields (%d): %s", len(parts), string(p))
if *bestEffort {
log.Println(msg)
return nil, nil
} else {
return nil, fmt.Errorf(msg)
}
}
result := PatDOI.FindString(parts[*index-1])
if result == "" && *skipNonMatches {
return nil, nil
}
parts[*index-1] = strings.ToLower(result)
return []byte(strings.Join(parts, *delimiter)), nil
}
|