1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
|
package zipkey
import (
"runtime"
"sync"
)
const (
defaultBatchSize = 2000
maxGroupSize = 500 // short circuiting threshold
)
// Batcher runs reducers in parallel on batches of groups.
type Batcher struct {
Size int
NumWorkers int
gf groupFunc
batch []*Group
queue chan []*Group
wg sync.WaitGroup
err error
closing bool // https://stackoverflow.com/q/16105325/89391
}
// NewBatcher set ups a new Batcher with a batch size of 1000.
func NewBatcher(gf groupFunc) *Batcher {
return NewBatcherSize(gf, defaultBatchSize)
}
// NewBatcherSize initializes a batcher with a given size.
func NewBatcherSize(gf groupFunc, size int) *Batcher {
batcher := Batcher{
gf: gf,
Size: size,
NumWorkers: runtime.NumCPU(),
queue: make(chan []*Group),
}
for i := 0; i < batcher.NumWorkers; i++ {
batcher.wg.Add(1)
go batcher.worker()
}
return &batcher
}
// Close tears down the batcher. If this is not called, you get goroutine leaks
// and will miss the data from the last uncommitted batch.
func (b *Batcher) Close() error {
b.closing = true
g := make([]*Group, len(b.batch))
copy(g, b.batch)
b.queue <- g
b.batch = nil
close(b.queue)
b.wg.Wait()
return b.err
}
// GroupFunc is a drop for a groupFunc. Use this function, where you used
// grouper before. Not thread safe. Panics if called after Close.
func (b *Batcher) GroupFunc(g *Group) error {
if b.closing {
panic("cannot call GroupFunc after Close")
}
b.batch = append(b.batch, g)
// A few groups have an extended size, e.g. thousands of members. We short
// curcuit on those.
oversized := len(g.G0) > maxGroupSize || len(g.G1) > maxGroupSize
if len(b.batch) == b.Size || oversized {
g := make([]*Group, len(b.batch))
copy(g, b.batch)
b.queue <- g
b.batch = nil
}
return nil
}
// worker will wind down after any error has been encountered. Multiple threads
// may set the error, but we currently only care whether the error is nil or
// not.
func (b *Batcher) worker() {
defer b.wg.Done()
OUTER:
for batch := range b.queue {
for _, g := range batch {
if err := b.gf(g); err != nil {
b.err = err
break OUTER
}
}
}
}
|