forked from koron/go-trie
-
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtrie.go
368 lines (312 loc) · 8.23 KB
/
trie.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
// Package trie implements a trie that allows a generic key to point to
// stored data. Whereas many trie implementations are optimized for string
// based keys, this implementation allows keys to be a sequence of "labels".
package trie
import (
"cmp"
"fmt"
"iter"
"slices"
"sort"
"strings"
"sync"
)
// Tokenizer is an object that tokenize a L into individual keys.
// For example, a string tokenizer would split a string into individual runes.
type Tokenizer[L any, K cmp.Ordered] interface {
Tokenize(L) (iter.Seq[K], error)
}
// TokenizeFunc is a function that implements the Tokenizer interface
type TokenizeFunc[L any, K cmp.Ordered] func(L) (iter.Seq[K], error)
func (f TokenizeFunc[L, K]) Tokenize(in L) (iter.Seq[K], error) {
return f(in)
}
// Trie is a trie that accepts arbitrary Key types as its input.
//
// L represents the "label", the input that is used to Get/Set/Delete
// a value from the trie.
//
// K represents the "key", the individual components that are associated
// with the nodes in the trie.
//
// V represents the "value", the data that is stored in the trie.
// Data is stored at the leaf nodes of the trie.
type Trie[L any, K cmp.Ordered, V any] struct {
mu sync.RWMutex
root *node[K, V]
tokenizer Tokenizer[L, K]
}
// Node represents an individual node in the trie.
type Node[K cmp.Ordered, V any] interface {
// Key returns the key associated with this node
Key() K
// Value returns the value associated with this node
Value() V
// First returns the first child node. If there are no children,
// it returns nil.
First() Node[K, V]
// Children returns a sequence of child nodes
Children() iter.Seq[Node[K, V]]
// NumChildren returns the number of children
NumChildren() int
// AddChild adds a child node to this node
AddChild(Node[K, V])
// Parent returns the parent node. If this is the root node, it returns nil
Parent() Node[K, V]
// Ancestors returns a sequence of ancestor nodes, starting from the
Ancestors() iter.Seq[Node[K, V]]
}
// New creates a new Trie object.
func New[L any, K cmp.Ordered, V any](tokenizer Tokenizer[L, K]) *Trie[L, K, V] {
return &Trie[L, K, V]{
root: newNode[K, V](),
tokenizer: tokenizer,
}
}
// Get returns the value associated with `key`. The second return value
// indicates if the value was found.
func (t *Trie[L, K, V]) Get(key L) (V, bool) {
var zero V
iter, err := t.tokenizer.Tokenize(key)
if err != nil {
return zero, false
}
t.mu.RLock()
defer t.mu.RUnlock()
var tokens []K
for x := range iter {
tokens = append(tokens, x)
}
node, ok := getNode(t.root, tokens)
if !ok {
return zero, false
}
return node.Value(), true
}
func (t *Trie[L, K, V]) GetNode(key L) (Node[K, V], bool) {
iter, err := t.tokenizer.Tokenize(key)
if err != nil {
return nil, false
}
t.mu.RLock()
defer t.mu.RUnlock()
var tokens []K
for x := range iter {
tokens = append(tokens, x)
}
return getNode(t.root, tokens)
}
func getNode[K cmp.Ordered, V any](root Node[K, V], tokens []K) (Node[K, V], bool) {
if len(tokens) > 0 {
for child := range root.Children() {
if child.Key() == tokens[0] {
// found the current token in the children.
if len(tokens) == 1 {
// this is the node we're looking for
return child, true
}
// we need to traverse down the trie
return getNode[K, V](child, tokens[1:])
}
}
}
// if we got here, that means we couldn't find a common ancestor
return nil, false
}
// Delete removes data associated with `key`. It returns true if the value
// was found and deleted, false otherwise
func (t *Trie[L, K, V]) Delete(key L) bool {
iter, err := t.tokenizer.Tokenize(key)
if err != nil {
return false
}
var tokens []K
for x := range iter {
tokens = append(tokens, x)
}
t.mu.Lock()
defer t.mu.Unlock()
return delete[K, V](t.root, tokens)
}
func delete[K cmp.Ordered, V any](root *node[K, V], tokens []K) bool {
if len(tokens) <= 0 {
return false
}
for i, child := range root.children {
if child.Key() == tokens[0] {
if len(tokens) == 1 {
// this is the node we're looking for
root.children = slices.Delete(root.children, i, i+1)
return true
}
// we need to traverse down the trie
if delete[K, V](child, tokens[1:]) {
if len(child.children) == 0 {
root.children = slices.Delete(root.children, i, i+1)
}
return true
}
return false
}
}
return false
}
// Put sets `key` to point to data `value`.
func (t *Trie[L, K, V]) Put(key L, value V) error {
iter, err := t.tokenizer.Tokenize(key)
if err != nil {
return fmt.Errorf(`failed to tokenize key: %w`, err)
}
node := t.root
var tokens []K
for x := range iter {
tokens = append(tokens, x)
}
t.mu.Lock()
defer t.mu.Unlock()
put[K, V](node, tokens, value)
return nil
}
func put[K cmp.Ordered, V any](root Node[K, V], tokens []K, value V) {
if len(tokens) == 0 {
return
}
for _, token := range tokens {
for child := range root.Children() {
if child.Key() == token {
// found the current token in the children.
// we need to traverse down the trie
put[K, V](child, tokens[1:], value)
return
}
}
}
// if we got here, that means we couldn't find a common ancestor
// the first token has already been consumed, create a new node,
var newRoot *node[K, V]
var cur *node[K, V]
for _, token := range tokens { // duplicate token?
newNode := newNode[K, V]()
newNode.key = token
if cur == nil {
newRoot = newNode
} else {
cur.children = append(cur.children, newNode)
}
cur = newNode
}
// cur holds the last element.
cur.value = value
root.AddChild(newRoot)
}
type node[K cmp.Ordered, V any] struct {
mu sync.RWMutex
key K
value V
children []*node[K, V]
parent *node[K, V]
}
func newNode[K cmp.Ordered, V any]() *node[K, V] {
return &node[K, V]{}
}
func (n *node[K, V]) Key() K {
return n.key
}
func (n *node[K, V]) Value() V {
return n.value
}
func (n *node[K, V]) Parent() Node[K, V] {
return n.parent
}
func (n *node[K, V]) Ancestors() iter.Seq[Node[K, V]] {
return func(yield func(Node[K, V]) bool) {
cur := n.parent
for cur != nil {
if !yield(cur) {
break
}
cur = cur.parent
}
}
}
func (n *node[K, V]) First() Node[K, V] {
n.mu.RLock()
defer n.mu.RUnlock()
if len(n.children) == 0 {
return nil
}
return n.children[0]
}
func (n *node[K, V]) NumChildren() int {
n.mu.RLock()
defer n.mu.RUnlock()
return len(n.children)
}
var emptyIter = func(yield func(Node[K, V]) bool) {}
func (n *node[K, V]) Children() iter.Seq[Node[K, V]] {
n.mu.RLock()
if len(n.children) == 0 {
n.mu.RUnlock()
return emptyIter
}
children := make([]*node[K, V], len(n.children))
copy(children, n.children)
n.mu.RUnlock()
return func(yield func(Node[K, V]) bool) {
for _, child := range children {
if !yield(child) {
break
}
}
}
}
func (n *node[K, V]) AddChild(child Node[K, V]) {
n.mu.Lock()
// This is kind of gross, but we're only covering *node[T] with
// Node[T] interface because we don't want the users to instantiate
// their own nodes... so this type conversion is safe.
//nolint:forcetypeassert
raw := child.(*node[K, V])
raw.parent = n
n.children = append(n.children, raw)
sort.Slice(n.children, func(i, j int) bool {
return n.children[i].Key() < n.children[j].Key()
})
n.mu.Unlock()
}
type VisitMetadata struct {
Depth int
}
type Visitor[K cmp.Ordered, V any] interface {
Visit(Node[K, V], VisitMetadata) bool
}
type VisitFunc[K cmp.Ordered, V any] func(Node[K, V], VisitMetadata) bool
func (f VisitFunc[K, V]) Visit(n Node[K, V], m VisitMetadata) bool {
return f(n, m)
}
func Walk[L any, K cmp.Ordered, V any](trie *Trie[L, K, V], v Visitor[K, V]) {
var meta VisitMetadata
meta.Depth = 1
walk(trie.root, v, meta)
}
func walk[K cmp.Ordered, V any](node Node[K, V], v Visitor[K, V], meta VisitMetadata) {
for child := range node.Children() {
if !v.Visit(child, meta) {
break
}
walk(child, v, VisitMetadata{Depth: meta.Depth + 1})
}
}
type dumper[K cmp.Ordered, V any] struct{}
func (dumper[K, V]) Visit(n Node[K, V], meta VisitMetadata) bool {
var sb strings.Builder
for i := 0; i < meta.Depth; i++ {
sb.WriteString(" ")
}
fmt.Fprintf(&sb, "%q: %v", fmt.Sprintf("%v", n.Key()), n.Value())
fmt.Println(sb.String())
return true
}
func Dumper[K cmp.Ordered, V any]() Visitor[K, V] {
return dumper[K, V]{}
}