@@ -344,6 +344,7 @@ const (
344
344
ColumnPeriodSum = "sum(" + profile .ColumnPeriod + ")"
345
345
ColumnValueCount = "count(" + profile .ColumnValue + ")"
346
346
ColumnValueSum = "sum(" + profile .ColumnValue + ")"
347
+ ColumnValueFirst = "first(" + profile .ColumnValue + ")"
347
348
)
348
349
349
350
func (q * Querier ) queryRangeDelta (ctx context.Context , filterExpr logicalplan.Expr , step time.Duration , sampleTypeUnit string ) ([]* pb.MetricsSeries , error ) {
@@ -531,17 +532,27 @@ func (q *Querier) queryRangeNonDelta(ctx context.Context, filterExpr logicalplan
531
532
Filter (filterExpr ).
532
533
Aggregate (
533
534
[]logicalplan.Expr {
534
- logicalplan .Sum (logicalplan .Col (profile .ColumnValue )),
535
+ logicalplan .Sum (logicalplan .Col (profile .ColumnValue )). Alias ( ColumnValueFirst ) ,
535
536
},
536
537
[]logicalplan.Expr {
537
538
logicalplan .DynCol (profile .ColumnLabels ),
538
539
logicalplan .Col (profile .ColumnTimestamp ),
539
540
},
540
541
).
542
+ Aggregate (
543
+ []logicalplan.Expr {
544
+ logicalplan .Take (logicalplan .Col (profile .ColumnValue ), 1 ).Alias (ColumnValueFirst ),
545
+ },
546
+ []logicalplan.Expr {
547
+ logicalplan .DynCol (profile .ColumnLabels ),
548
+ logicalplan .Duration (1000 * time .Millisecond ),
549
+ },
550
+ ).
541
551
Execute (ctx , func (ctx context.Context , r arrow.Record ) error {
542
552
r .Retain ()
543
553
records = append (records , r )
544
554
rows += int (r .NumRows ())
555
+ fmt .Printf ("%v\n " , r )
545
556
return nil
546
557
})
547
558
if err != nil {
@@ -561,7 +572,7 @@ func (q *Querier) queryRangeNonDelta(ctx context.Context, filterExpr logicalplan
561
572
// Add necessary columns and their found value is false by default.
562
573
columnIndices := map [string ]columnIndex {
563
574
profile .ColumnTimestamp : {},
564
- ColumnValueSum : {},
575
+ ColumnValueFirst : {},
565
576
}
566
577
labelColumnIndices := []int {}
567
578
labelSet := labels.Labels {}
@@ -623,7 +634,11 @@ func (q *Querier) queryRangeNonDelta(ctx context.Context, filterExpr logicalplan
623
634
}
624
635
625
636
ts := ar .Column (columnIndices [profile .ColumnTimestamp ].index ).(* array.Int64 ).Value (i )
626
- value := ar .Column (columnIndices [ColumnValueSum ].index ).(* array.Int64 ).Value (i )
637
+
638
+ // value := ar.Column(columnIndices[ColumnValueFirst].index).(*array.Int64).Value(i)
639
+ valueList := ar .Column (columnIndices [ColumnValueFirst ].index ).(* array.List )
640
+ start , _ := valueList .ValueOffsets (i )
641
+ value := valueList .ListValues ().(* array.Int64 ).Value (int (start ))
627
642
628
643
// Each step bucket will only return one of the timestamps and its value.
629
644
// For this reason we'll take each timestamp and divide it by the step seconds.
@@ -634,6 +649,7 @@ func (q *Querier) queryRangeNonDelta(ctx context.Context, filterExpr logicalplan
634
649
// This needs to be moved to FrostDB to not even query all of this data in the first place.
635
650
// With a scrape interval of 10s and a query range of 1d we'd query 8640 samples and at most return 960.
636
651
// Even worse for a week, we'd query 60480 samples and only return 1000.
652
+
637
653
tsBucket := ts / 1000 / int64 (step .Seconds ())
638
654
if _ , found := resSeriesBuckets [index ][tsBucket ]; found {
639
655
// We already have a MetricsSample for this timestamp bucket, ignore it.
0 commit comments