From 30d22abc8c0b2911d196146a6e4d77d3eadabf0f Mon Sep 17 00:00:00 2001 From: Matthieu MOREL Date: Thu, 9 Jan 2025 20:16:37 +0100 Subject: [PATCH] chore: enable early-return from revive Signed-off-by: Matthieu MOREL --- .golangci.yml | 5 ++ confmap/provider/aesprovider/provider_test.go | 5 +- exporter/prometheusexporter/accumulator.go | 7 ++- internal/aws/metrics/metric_calculator.go | 5 +- internal/sqlquery/scraper.go | 5 +- pkg/stanza/entry/entry.go | 6 +-- .../internal/stores/podstore.go | 17 +++---- receiver/awss3receiver/receiver.go | 6 +-- .../internal/kubelet/metadata.go | 12 ++--- .../internal/transaction.go | 7 +-- receiver/saphanareceiver/queries.go | 9 ++-- receiver/solacereceiver/receiver.go | 49 +++++++++---------- testbed/testbed/child_process_collector.go | 10 ++-- 13 files changed, 71 insertions(+), 72 deletions(-) diff --git a/.golangci.yml b/.golangci.yml index 27206273d148b..53d3ba663adee 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -89,6 +89,9 @@ linters-settings: - name: context-keys-type # Importing with `.` makes the programs much harder to understand - name: dot-imports + - name: early-return + arguments: + - "preserveScope" # Empty blocks make code less readable and could be a symptom of a bug or unfinished refactoring. - name: empty-block # for better readability, variables of type `error` must be named with the prefix `err`. @@ -111,6 +114,8 @@ linters-settings: - name: redefines-builtin-id # redundant else-blocks that can be eliminated from the code. - name: superfluous-else + arguments: + - "preserveScope" # prevent confusing name for variables when using `time` package - name: time-naming # warns when an exported function or method returns a value of an un-exported type. diff --git a/confmap/provider/aesprovider/provider_test.go b/confmap/provider/aesprovider/provider_test.go index cc8e62489246b..d81aee471564f 100644 --- a/confmap/provider/aesprovider/provider_test.go +++ b/confmap/provider/aesprovider/provider_test.go @@ -104,13 +104,12 @@ func TestAESCredentialProvider(t *testing.T) { p := NewFactory().Create(confmap.ProviderSettings{}) retrieved, err := p.Retrieve(context.Background(), tt.configValue, nil) - if tt.expectedError == "" { - require.NoError(t, err) - } else { + if tt.expectedError != "" { require.Error(t, err) require.Equal(t, tt.expectedError, err.Error()) return } + require.NoError(t, err) require.NotNil(t, retrieved) stringValue, err := retrieved.AsString() require.NoError(t, err) diff --git a/exporter/prometheusexporter/accumulator.go b/exporter/prometheusexporter/accumulator.go index 3a6559118fe54..449d7de4d6cfd 100644 --- a/exporter/prometheusexporter/accumulator.go +++ b/exporter/prometheusexporter/accumulator.go @@ -261,15 +261,14 @@ func (a *lastValueAccumulator) accumulateHistogram(metric pmetric.Metric, il pco zap.String("pp_timestamp", pp.Timestamp().String()), zap.String("ip_timestamp", ip.Timestamp().String()), ).Warn("Misaligned starting timestamps") - if ip.StartTimestamp().AsTime().After(pp.Timestamp().AsTime()) { - a.logger.Debug("treating it like reset") - ip.CopyTo(m.Histogram().DataPoints().AppendEmpty()) - } else { + if !ip.StartTimestamp().AsTime().After(pp.Timestamp().AsTime()) { a.logger.With( zap.String("metric_name", metric.Name()), ).Warn("Dropped misaligned histogram datapoint") continue } + a.logger.Debug("treating it like reset") + ip.CopyTo(m.Histogram().DataPoints().AppendEmpty()) } else { a.logger.Debug("Accumulate another histogram datapoint") accumulateHistogramValues(pp, ip, m.Histogram().DataPoints().AppendEmpty()) diff --git a/internal/aws/metrics/metric_calculator.go b/internal/aws/metrics/metric_calculator.go index 16435ef7f7540..979bb91a6c50e 100644 --- a/internal/aws/metrics/metric_calculator.go +++ b/internal/aws/metrics/metric_calculator.go @@ -26,11 +26,10 @@ func NewFloat64DeltaCalculator() MetricCalculator { func calculateDelta(prev *MetricValue, val any, _ time.Time) (any, bool) { var deltaValue float64 - if prev != nil { - deltaValue = val.(float64) - prev.RawValue.(float64) - } else { + if prev == nil { return deltaValue, false } + deltaValue = val.(float64) - prev.RawValue.(float64) return deltaValue, true } diff --git a/internal/sqlquery/scraper.go b/internal/sqlquery/scraper.go index 680a6164fa0af..0de61c0559e9e 100644 --- a/internal/sqlquery/scraper.go +++ b/internal/sqlquery/scraper.go @@ -74,11 +74,10 @@ func (s *Scraper) ScrapeMetrics(ctx context.Context) (pmetric.Metrics, error) { out := pmetric.NewMetrics() rows, err := s.Client.QueryRows(ctx) if err != nil { - if errors.Is(err, ErrNullValueWarning) { - s.Logger.Warn("problems encountered getting metric rows", zap.Error(err)) - } else { + if !errors.Is(err, ErrNullValueWarning) { return out, fmt.Errorf("Scraper: %w", err) } + s.Logger.Warn("problems encountered getting metric rows", zap.Error(err)) } ts := pcommon.NewTimestampFromTime(time.Now()) rms := out.ResourceMetrics() diff --git a/pkg/stanza/entry/entry.go b/pkg/stanza/entry/entry.go index 882f22695f415..caf8876644e2e 100644 --- a/pkg/stanza/entry/entry.go +++ b/pkg/stanza/entry/entry.go @@ -136,11 +136,11 @@ func (entry *Entry) readToStringMap(field FieldInterface, dest *map[string]strin case map[string]any: newDest := make(map[string]string) for k, v := range m { - if vStr, ok := v.(string); ok { - newDest[k] = vStr - } else { + vStr, ok := v.(string) + if !ok { return fmt.Errorf("can not cast map members '%s' of type '%s' to string", k, v) } + newDest[k] = vStr } *dest = newDest case map[any]any: diff --git a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go index 16350c21fe040..71396090b20ee 100644 --- a/receiver/awscontainerinsightreceiver/internal/stores/podstore.go +++ b/receiver/awscontainerinsightreceiver/internal/stores/podstore.go @@ -206,18 +206,17 @@ func (p *PodStore) Decorate(ctx context.Context, metric CIMetric, kubernetesBlob } // If the entry is not a placeholder, decorate the pod - if entry.pod.Name != "" { - p.decorateCPU(metric, &entry.pod) - p.decorateMem(metric, &entry.pod) - p.addStatus(metric, &entry.pod) - addContainerCount(metric, &entry.pod) - addContainerID(&entry.pod, metric, kubernetesBlob, p.logger) - p.addPodOwnersAndPodName(metric, &entry.pod, kubernetesBlob) - addLabels(&entry.pod, kubernetesBlob) - } else { + if entry.pod.Name == "" { p.logger.Warn("no pod information is found in podstore for pod " + podKey) return false } + p.decorateCPU(metric, &entry.pod) + p.decorateMem(metric, &entry.pod) + p.addStatus(metric, &entry.pod) + addContainerCount(metric, &entry.pod) + addContainerID(&entry.pod, metric, kubernetesBlob, p.logger) + p.addPodOwnersAndPodName(metric, &entry.pod, kubernetesBlob) + addLabels(&entry.pod, kubernetesBlob) } return true } diff --git a/receiver/awss3receiver/receiver.go b/receiver/awss3receiver/receiver.go index 89e67c4f525c4..b4df749767b7e 100644 --- a/receiver/awss3receiver/receiver.go +++ b/receiver/awss3receiver/receiver.go @@ -251,11 +251,11 @@ func newEncodingExtensions(encodingsConfig []Encoding, host component.Host) (enc encodings := make(encodingExtensions, 0) extensions := host.GetExtensions() for _, configItem := range encodingsConfig { - if e, ok := extensions[configItem.Extension]; ok { - encodings = append(encodings, encodingExtension{extension: e, suffix: configItem.Suffix}) - } else { + e, ok := extensions[configItem.Extension] + if !ok { return nil, fmt.Errorf("extension %q not found", configItem.Extension) } + encodings = append(encodings, encodingExtension{extension: e, suffix: configItem.Suffix}) } return encodings, nil } diff --git a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go index a2ffdb452dc46..5824b46a2b48f 100644 --- a/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go +++ b/receiver/kubeletstatsreceiver/internal/kubelet/metadata.go @@ -34,14 +34,14 @@ var supportedLabels = map[MetadataLabel]bool{ func ValidateMetadataLabelsConfig(labels []MetadataLabel) error { labelsFound := map[MetadataLabel]bool{} for _, label := range labels { - if _, supported := supportedLabels[label]; supported { - if _, duplicate := labelsFound[label]; duplicate { - return fmt.Errorf("duplicate metadata label: %q", label) - } - labelsFound[label] = true - } else { + _, supported := supportedLabels[label] + if !supported { return fmt.Errorf("label %q is not supported", label) } + if _, duplicate := labelsFound[label]; duplicate { + return fmt.Errorf("duplicate metadata label: %q", label) + } + labelsFound[label] = true } return nil } diff --git a/receiver/prometheusreceiver/internal/transaction.go b/receiver/prometheusreceiver/internal/transaction.go index 00728449629d7..1740588a27cc9 100644 --- a/receiver/prometheusreceiver/internal/transaction.go +++ b/receiver/prometheusreceiver/internal/transaction.go @@ -198,12 +198,13 @@ func (t *transaction) getOrCreateMetricFamily(key resourceKey, scope scopeID, mn if _, ok := t.mc.GetMetadata(mn); !ok { fn = normalizeMetricName(mn) } - if mf, ok := t.families[key][scope][fn]; ok && mf.includesMetric(mn) { - curMf = mf - } else { + mf, ok := t.families[key][scope][fn] + if !ok || !mf.includesMetric(mn) { curMf = newMetricFamily(mn, t.mc, t.logger) t.families[key][scope][curMf.name] = curMf return curMf, false + } else { + curMf = mf } } return curMf, true diff --git a/receiver/saphanareceiver/queries.go b/receiver/saphanareceiver/queries.go index ec565dcea8f77..c87a718920a60 100644 --- a/receiver/saphanareceiver/queries.go +++ b/receiver/saphanareceiver/queries.go @@ -37,13 +37,12 @@ func (q *queryStat) collectStat(s *sapHanaScraper, m *monitoringQuery, now pcomm return fmt.Errorf("unable to parse metric for key %s: %w", q.key, err) } - if q.addMetricFunction != nil { - if err = q.addMetricFunction(mb, now, val, row); err != nil { - return fmt.Errorf("failed to record metric for key %s: %w", q.key, err) - } - } else { + if q.addMetricFunction == nil { return errors.New("incorrectly configured query, addMetricFunction must be provided") } + if err = q.addMetricFunction(mb, now, val, row); err != nil { + return fmt.Errorf("failed to record metric for key %s: %w", q.key, err) + } } return nil } diff --git a/receiver/solacereceiver/receiver.go b/receiver/solacereceiver/receiver.go index 893c677bd69e9..8a702d9c5ce71 100644 --- a/receiver/solacereceiver/receiver.go +++ b/receiver/solacereceiver/receiver.go @@ -265,35 +265,34 @@ flowControlLoop: } forwardErr := s.nextConsumer.ConsumeTraces(ctx, traces) - if forwardErr != nil { - if !consumererror.IsPermanent(forwardErr) { - s.settings.Logger.Info("Encountered temporary error while forwarding traces to next receiver, will allow redelivery", zap.Error(forwardErr)) - // handle flow control metrics - if flowControlCount == 0 { - s.telemetryBuilder.SolacereceiverReceiverFlowControlStatus.Record(ctx, int64(flowControlStateControlled), metric.WithAttributeSet(s.metricAttrs)) - } - flowControlCount++ - s.telemetryBuilder.SolacereceiverReceiverFlowControlRecentRetries.Record(ctx, flowControlCount, metric.WithAttributeSet(s.metricAttrs)) - // Backpressure scenario. For now, we are only delayed retry, eventually we may need to handle this - delayTimer := time.NewTimer(s.config.Flow.DelayedRetry.Delay) - select { - case <-delayTimer.C: - continue flowControlLoop - case <-ctx.Done(): - s.settings.Logger.Info("Context was cancelled while attempting redelivery, exiting") - disposition = nil // do not make any network requests, we are shutting down - return errors.New("delayed retry interrupted by shutdown request") - } - } else { // error is permanent, we want to accept the message and increment the number of dropped messages - s.settings.Logger.Warn("Encountered permanent error while forwarding traces to next receiver, will swallow trace", zap.Error(forwardErr)) - s.telemetryBuilder.SolacereceiverDroppedSpanMessages.Add(ctx, 1, metric.WithAttributeSet(s.metricAttrs)) - break flowControlLoop - } - } else { + if forwardErr == nil { // no forward error s.telemetryBuilder.SolacereceiverReportedSpans.Add(ctx, int64(spanCount), metric.WithAttributeSet(s.metricAttrs)) break flowControlLoop } + if !consumererror.IsPermanent(forwardErr) { + s.settings.Logger.Info("Encountered temporary error while forwarding traces to next receiver, will allow redelivery", zap.Error(forwardErr)) + // handle flow control metrics + if flowControlCount == 0 { + s.telemetryBuilder.SolacereceiverReceiverFlowControlStatus.Record(ctx, int64(flowControlStateControlled), metric.WithAttributeSet(s.metricAttrs)) + } + flowControlCount++ + s.telemetryBuilder.SolacereceiverReceiverFlowControlRecentRetries.Record(ctx, flowControlCount, metric.WithAttributeSet(s.metricAttrs)) + // Backpressure scenario. For now, we are only delayed retry, eventually we may need to handle this + delayTimer := time.NewTimer(s.config.Flow.DelayedRetry.Delay) + select { + case <-delayTimer.C: + continue flowControlLoop + case <-ctx.Done(): + s.settings.Logger.Info("Context was cancelled while attempting redelivery, exiting") + disposition = nil // do not make any network requests, we are shutting down + return errors.New("delayed retry interrupted by shutdown request") + } + } else { // error is permanent, we want to accept the message and increment the number of dropped messages + s.settings.Logger.Warn("Encountered permanent error while forwarding traces to next receiver, will swallow trace", zap.Error(forwardErr)) + s.telemetryBuilder.SolacereceiverDroppedSpanMessages.Add(ctx, 1, metric.WithAttributeSet(s.metricAttrs)) + break flowControlLoop + } } // Make sure to clear the stats no matter what, unless we were interrupted in which case we should preserve the last state if flowControlCount != 0 { diff --git a/testbed/testbed/child_process_collector.go b/testbed/testbed/child_process_collector.go index 6661f35595dd3..8645eeb637970 100644 --- a/testbed/testbed/child_process_collector.go +++ b/testbed/testbed/child_process_collector.go @@ -334,12 +334,12 @@ func (cp *childProcessCollector) WatchResourceConsumption() error { for start := time.Now(); time.Since(start) < time.Minute; { cp.fetchRAMUsage() cp.fetchCPUUsage() - if err := cp.checkAllowedResourceUsage(); err != nil { - log.Printf("Allowed usage of resources is too high before test starts wait for one second : %v", err) - time.Sleep(time.Second) - } else { + err := cp.checkAllowedResourceUsage(); + if err == nil { break - } + } + log.Printf("Allowed usage of resources is too high before test starts wait for one second : %v", err) + time.Sleep(time.Second) } remainingFailures := cp.resourceSpec.MaxConsecutiveFailures