aboutsummaryrefslogtreecommitdiffstats
path: root/skate/cmd/skate-cleanup/main.go
blob: e2fd27eae5a5d3847a7bba6213bffe41eaf60152 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
// Filter to parse out a correctly looking DOI, URL, etc from a field.
//
// $ echo "1,xxx 10.123/12312 xxx,3" | skate-to-doi -c doi -d , -f 2
// 1,10.123/12312,3k
//
// We can use this to sanitize fields in the reference dataset.

package main

import (
	"flag"
	"log"
	"os"
	"runtime"
	"strings"

	"git.archive.org/martin/cgraph/skate"
	"git.archive.org/martin/cgraph/skate/parallel"
)

var (
	numWorkers      = flag.Int("w", runtime.NumCPU(), "number of workers")
	batchSize       = flag.Int("b", 100000, "batch size")
	delimiter       = flag.String("d", "\t", "delimiter")
	index           = flag.Int("f", 1, "one field to cleanup up a doi, 1-indexed")
	bestEffort      = flag.Bool("B", false, "only log errors, but do not stop")
	skipNonMatches  = flag.Bool("S", false, "do not emit a line for non-matches")
	what            = flag.String("c", "doi", "what to clean: doi, url, ref")
	extendedCleanup = flag.Bool("X", false, "extended (and slower) cleanup for urls")
	allow           = flag.String("allow", "http,https", "comma separted list of schemas to allow for urls")

	allowedSchemas []string // parsed from allow flag
)

func main() {
	flag.Parse()
	for _, v := range strings.Split(*allow, ",") {
		allowedSchemas = append(allowedSchemas, strings.TrimSpace(v))
	}
	var f func([]byte) ([]byte, error)
	switch *what {
	case "ref":
		filter := skate.FilterRawRef{}
		f = filter.Run
	case "url":
		filter := skate.FilterURL{
			Delimiter:      *delimiter,
			Index:          *index,
			BestEffort:     *bestEffort,
			Aggressive:     *extendedCleanup,
			SkipNonMatches: *skipNonMatches,
			AllowedSchemas: allowedSchemas,
		}
		f = filter.Run
	default:
		filter := skate.FilterDOI{
			Delimiter:      *delimiter,
			Index:          *index,
			BestEffort:     *bestEffort,
			Aggressive:     *extendedCleanup,
			SkipNonMatches: *skipNonMatches,
		}
		f = filter.Run
	}
	pp := parallel.NewProcessor(os.Stdin, os.Stdout, f)
	pp.NumWorkers = *numWorkers
	pp.BatchSize = *batchSize
	if err := pp.Run(); err != nil {
		log.Fatal(err)
	}
}