-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathmain.go
308 lines (264 loc) · 8.28 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
package main
import (
"bufio"
"errors"
"fmt"
"log/slog"
"os"
"path/filepath"
"regexp"
"strconv"
"time"
"github.com/hostnetbr/gatekeeper-log-exporter/exporter"
"github.com/hostnetbr/gatekeeper-log-exporter/exporter/influx"
"github.com/fsnotify/fsnotify"
)
const (
confFile = "/etc/gkle.yaml"
lastLogFile = "/var/lib/gkle/last"
timeLayout = "2006-01-02 15:04:05"
)
var logLineRegex = regexp.MustCompile(`^GK\/(\d+)\s+(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})\s+NOTICE\s+Basic\s+measurements\s+\[tot_pkts_num\s+=\s+(\d+),\s+tot_pkts_size\s+=\s+(\d+),\s+pkts_num_granted\s+=\s+(\d+),\s+pkts_size_granted\s+=\s+(\d+),\s+pkts_num_request\s+=\s+(\d+),\s+pkts_size_request\s+=\s+(\d+),\s+pkts_num_declined\s+=\s+(\d+),\s+pkts_size_declined\s+=\s+(\d+),\s+tot_pkts_num_dropped\s+=\s+(\d+),\s+tot_pkts_size_dropped\s+=\s+(\d+),\s+tot_pkts_num_distributed\s+=\s+(\d+),\s+tot_pkts_size_distributed\s+=\s+(\d+),\s+flow_table_occupancy\s+=\s+(\d+)\/(\d+)=\d+\.\d+%]`)
var logFileRegex = regexp.MustCompile(`gatekeeper_\d{4}_\d{2}_\d{2}_\d{2}_\d{2}.log`)
func main() {
os.Exit(run())
}
func run() int {
cfg, err := parseConfig()
if err != nil {
slog.Error(fmt.Sprintf("error reading config file: %v\n", err))
return 1
}
if cfg.LogLineRegex != "" {
logLineRegex = regexp.MustCompile(cfg.LogLineRegex)
}
logger := slog.New(slog.NewTextHandler(os.Stdout, &slog.HandlerOptions{Level: slog.LevelDebug}))
slog.SetDefault(logger)
watcher, err := fsnotify.NewWatcher()
if err != nil {
slog.Error(fmt.Sprintf("error creating fsnotify watcher: %v\n", err))
return 1
}
defer watcher.Close()
done := make(chan bool)
go func() {
for {
select {
case event, ok := <-watcher.Events:
if !ok {
return
}
if event.Op&fsnotify.Create == fsnotify.Create {
filesToParse, err := getFilesToParse(cfg.GkLogDir)
if err != nil {
slog.Error(fmt.Sprintf("error getting log files to parse: %v", err))
return
}
for _, file := range filesToParse {
ex := influx.NewExporter(*cfg.InfluxDB)
parseLogFile(file, ex)
ex.Close()
if err := saveLastLog(file); err != nil {
slog.Error(fmt.Sprintf("error saving last read log: %v", err))
return
}
}
}
case err, ok := <-watcher.Errors:
if !ok {
return
}
slog.Error(fmt.Sprintf("error while watching log dir: %v\n", err))
}
}
}()
err = watcher.Add(cfg.GkLogDir)
if err != nil {
slog.Error(fmt.Sprintf("error adding watcher to log dir: %s. %v\n", cfg.GkLogDir, err))
return 1
}
<-done
return 0
}
func getFilesToParse(path string) ([]string, error) {
entries, err := os.ReadDir(path)
if err != nil {
return nil, fmt.Errorf("error reading gatekeeper log dir %s: %w", path, err)
}
parseAll := false
lastParsedLog, err := os.ReadFile(lastLogFile)
if err != nil {
if os.IsNotExist(err) {
slog.Debug("parsing all log files")
parseAll = true
} else {
return nil, fmt.Errorf("error reading last log file: %w", err)
}
}
var files []string
var lastParsedLogPosition int
// We don't parse the last file of the directory because it's still being
// written by gatekeeper.
for _, entry := range entries {
if !entry.IsDir() && logFileRegex.MatchString(entry.Name()) {
fileName := filepath.Join(path, entry.Name())
files = append(files, fileName)
if fileName == string(lastParsedLog) {
lastParsedLogPosition = len(files) - 1
}
}
}
if parseAll || lastParsedLogPosition == 0 {
return files[:len(files)-1], nil
}
return files[lastParsedLogPosition+1 : len(files)-1], nil
}
func parseLogFile(filename string, ex exporter.Interface) error {
slog.Debug(fmt.Sprintf("parsing log file %s", filename))
logFile, err := os.Open(filename)
if err != nil {
return fmt.Errorf("error opening log file: %w", err)
}
defer logFile.Close()
fileScanner := bufio.NewScanner(logFile)
fileScanner.Split(bufio.ScanLines)
err = match(fileScanner, ex.Export)
if err != nil {
return fmt.Errorf("error reading log file: %w", err)
}
return nil
}
var errNoMatch = errors.New("line does not match")
func match(sc *bufio.Scanner, f func(time.Time, *exporter.Measurements) error) error {
entries := make(map[int]exporter.Entry)
for sc.Scan() {
line := sc.Text()
entry, err := parseEntry(line)
if err != nil {
// line doesn't match up with regex; ignornig
if err == errNoMatch {
continue
}
return fmt.Errorf("error parsing entry: %w", err)
}
if _, repeat := entries[entry.Lcore]; repeat {
// repeated lcore; proccessing previous minute and starting new one
time, aggr := aggregate(entries)
if err := f(time, &aggr); err != nil {
return fmt.Errorf("error exporting data: %w", err)
}
entries = make(map[int]exporter.Entry)
}
entries[entry.Lcore] = entry
}
if sc.Err() == nil {
// EOF
time, aggr := aggregate(entries)
if err := f(time, &aggr); err != nil {
return fmt.Errorf("error exporting data: %w", err)
}
}
return nil
}
func parseEntry(line string) (exporter.Entry, error) {
matches := logLineRegex.FindStringSubmatch(line)
if matches == nil {
return exporter.Entry{}, errNoMatch
}
logTime, err := time.Parse(timeLayout, matches[2])
if err != nil {
return exporter.Entry{}, fmt.Errorf("error parsing log time: %w", err)
}
lcore, err := strconv.Atoi(matches[1])
if err != nil {
return exporter.Entry{}, fmt.Errorf("error parsing lcore: %w", err)
}
measurements := exporter.Measurements{
TotPktsNum: mustParseUint(matches[3]),
TotPktsSize: mustParseUint(matches[4]),
PktsNumGranted: mustParseUint(matches[5]),
PktsSizeGranted: mustParseUint(matches[6]),
PktsNumRequest: mustParseUint(matches[7]),
PktsSizeRequest: mustParseUint(matches[8]),
PktsNumDeclined: mustParseUint(matches[9]),
PktsSizeDeclined: mustParseUint(matches[10]),
TotPktsNumDropped: mustParseUint(matches[11]),
TotPktsSizeDropped: mustParseUint(matches[12]),
TotPktsNumDistributed: mustParseUint(matches[13]),
TotPktsSizeDistributed: mustParseUint(matches[14]),
FlowTableOcupancyCurrent: mustParseUint(matches[15]),
FlowTableOcupancyMax: mustParseUint(matches[16]),
}
entry := exporter.Entry{
Time: logTime,
Lcore: lcore,
Measurements: measurements,
}
return entry, nil
}
func mustParseUint(s string) uint64 {
u, err := strconv.ParseUint(s, 10, 64)
if err != nil {
panic(err)
}
return u
}
func aggregate(entries map[int]exporter.Entry) (time.Time, exporter.Measurements) {
var time time.Time
var aggr exporter.Measurements
for _, e := range entries {
time = e.Time
m := e.Measurements
aggr.TotPktsNum += m.TotPktsNum
aggr.TotPktsSize += m.TotPktsSize
aggr.PktsNumGranted += m.PktsNumGranted
aggr.PktsSizeGranted += m.PktsSizeGranted
aggr.PktsNumRequest += m.PktsNumRequest
aggr.PktsSizeRequest += m.PktsSizeRequest
aggr.PktsNumDeclined += m.PktsNumDeclined
aggr.PktsSizeDeclined += m.PktsSizeDeclined
aggr.TotPktsNumDropped += m.TotPktsNumDropped
aggr.TotPktsSizeDropped += m.TotPktsSizeDropped
aggr.TotPktsNumDistributed += m.TotPktsNumDistributed
aggr.TotPktsSizeDistributed += m.TotPktsSizeDistributed
aggr.FlowTableOcupancyCurrent += m.FlowTableOcupancyCurrent
aggr.FlowTableOcupancyMax += m.FlowTableOcupancyMax
}
return time, aggr
}
func saveLastLog(logFile string) error {
err := os.Mkdir(filepath.Dir(lastLogFile), 755)
if err != nil && !os.IsExist(err) {
return fmt.Errorf("create directory failed: %w", err)
}
dir, err := os.Open(filepath.Dir(lastLogFile))
if err != nil {
return fmt.Errorf("open directory failed: %w", err)
}
defer dir.Close()
tmp := fmt.Sprintf("%s.tmp", lastLogFile)
if err := safeWrite(tmp, logFile); err != nil {
return fmt.Errorf("safe write failed: %w", err)
}
if err := os.Rename(tmp, lastLogFile); err != nil {
return fmt.Errorf("rename failed: %w", err)
}
if err := dir.Sync(); err != nil {
return fmt.Errorf("sync directory failed: %w", err)
}
return nil
}
func safeWrite(path string, data string) error {
file, err := os.Create(path)
if err != nil {
return fmt.Errorf("create failed: %w", err)
}
defer file.Close()
if _, err := file.WriteString(data); err != nil {
return fmt.Errorf("write failed: %w", err)
}
if err := file.Sync(); err != nil {
return fmt.Errorf("sync file failed: %w", err)
}
return nil
}