-
Notifications
You must be signed in to change notification settings - Fork 30
/
Copy pathinflight.go
196 lines (169 loc) · 5.29 KB
/
inflight.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
package sturdyc
import (
"context"
"errors"
"fmt"
"sync"
)
type inFlightCall[T any] struct {
sync.WaitGroup
val T
err error
}
// newFlight should be called with a lock.
func (c *Client[T]) newFlight(key string) *inFlightCall[T] {
call := new(inFlightCall[T])
call.Add(1)
c.inFlightMap[key] = call
return call
}
func makeCall[T any](ctx context.Context, c *Client[T], key string, fn FetchFn[T], call *inFlightCall[T]) {
defer func() {
if err := recover(); err != nil {
call.err = fmt.Errorf("sturdyc: panic recovered: %v", err)
}
call.Done()
c.inFlightMutex.Lock()
delete(c.inFlightMap, key)
c.inFlightMutex.Unlock()
}()
response, err := fn(ctx)
call.val = response
if c.storeMissingRecords && errors.Is(err, ErrNotFound) {
c.StoreMissingRecord(key)
call.err = ErrMissingRecord
return
}
if err != nil && !errors.Is(err, errOnlyDistributedRecords) {
call.err = err
return
}
if errors.Is(err, errOnlyDistributedRecords) {
call.err = onlyCachedRecords(err)
}
c.Set(key, response)
}
func callAndCache[T any](ctx context.Context, c *Client[T], key string, fn FetchFn[T]) (T, error) {
c.inFlightMutex.Lock()
if call, ok := c.inFlightMap[key]; ok {
c.inFlightMutex.Unlock()
call.Wait()
return call.val, call.err
}
call := c.newFlight(key)
c.inFlightMutex.Unlock()
makeCall(ctx, c, key, fn, call)
return call.val, call.err
}
// newBatchFlight should be called with a lock.
func (c *Client[T]) newBatchFlight(ids []string, keyFn KeyFn) *inFlightCall[map[string]T] {
call := new(inFlightCall[map[string]T])
call.val = make(map[string]T, len(ids))
call.Add(1)
for _, id := range ids {
c.inFlightBatchMap[keyFn(id)] = call
}
return call
}
func (c *Client[T]) endBatchFlight(ids []string, keyFn KeyFn, call *inFlightCall[map[string]T]) {
call.Done()
c.inFlightBatchMutex.Lock()
for _, id := range ids {
delete(c.inFlightBatchMap, keyFn(id))
}
c.inFlightBatchMutex.Unlock()
}
type makeBatchCallOpts[T any] struct {
ids []string
fn BatchFetchFn[T]
keyFn KeyFn
call *inFlightCall[map[string]T]
}
func makeBatchCall[T any](ctx context.Context, c *Client[T], opts makeBatchCallOpts[T]) {
response, err := opts.fn(ctx, opts.ids)
for id, record := range response {
// We never want to discard values from the fetch functions, even if they
// return an error. Instead, we'll pass them to the user along with any
// errors and let them decide what to do.
opts.call.val[id] = record
// However, we'll only write them to the cache if the fetchFunction returned a non-nil error.
if err == nil || errors.Is(err, errOnlyDistributedRecords) {
c.Set(opts.keyFn(id), record)
}
}
if err != nil && !errors.Is(err, errOnlyDistributedRecords) {
opts.call.err = err
return
}
if errors.Is(err, errOnlyDistributedRecords) {
opts.call.err = onlyCachedRecords(err)
}
// Check if we should store any of these IDs as a missing record. However, we
// don't want to do this if we only received records from the distributed
// storage. That means that the underlying data source errored for the ID's
// that we didn't have in our distributed storage, and we don't know wether
// these records are missing or not.
if c.storeMissingRecords && len(response) < len(opts.ids) && !errors.Is(err, errOnlyDistributedRecords) {
for _, id := range opts.ids {
if _, ok := response[id]; !ok {
c.StoreMissingRecord(opts.keyFn(id))
}
}
}
}
type callBatchOpts[T any] struct {
ids []string
keyFn KeyFn
fn BatchFetchFn[T]
}
func callAndCacheBatch[T any](ctx context.Context, c *Client[T], opts callBatchOpts[T]) (map[string]T, error) {
c.inFlightBatchMutex.Lock()
callIDs := make(map[*inFlightCall[map[string]T]][]string)
uniqueIDs := make([]string, 0, len(opts.ids))
for _, id := range opts.ids {
if call, ok := c.inFlightBatchMap[opts.keyFn(id)]; ok {
callIDs[call] = append(callIDs[call], id)
continue
}
uniqueIDs = append(uniqueIDs, id)
}
if len(uniqueIDs) > 0 {
call := c.newBatchFlight(uniqueIDs, opts.keyFn)
callIDs[call] = append(callIDs[call], uniqueIDs...)
go func() {
defer func() {
if err := recover(); err != nil {
call.err = fmt.Errorf("sturdyc: panic recovered: %v", err)
}
c.endBatchFlight(uniqueIDs, opts.keyFn, call)
}()
batchCallOpts := makeBatchCallOpts[T]{ids: uniqueIDs, fn: opts.fn, keyFn: opts.keyFn, call: call}
makeBatchCall(ctx, c, batchCallOpts)
}()
}
c.inFlightBatchMutex.Unlock()
var err error
response := make(map[string]T, len(opts.ids))
for call, callIDs := range callIDs {
call.Wait()
// We need to iterate through the values that WE want from this call. The batch
// could contain hundreds of IDs, but we might only want a few of them.
for _, id := range callIDs {
if v, ok := call.val[id]; ok {
response[id] = v
}
}
// This handles the scenario where we either don't get an error, or are
// using the distributed storage option and are able to get some records
// while the request to the underlying data source fails. In the latter
// case, we'll continue to accumulate partial responses as long as the only
// issue is cached-only records.
if err == nil || errors.Is(call.err, ErrOnlyCachedRecords) {
err = call.err
continue
}
// For any other kind of error, we'll short‑circuit the function and return.
return response, call.err
}
return response, err
}