diff --git a/.changeset/new-fans-teach.md b/.changeset/new-fans-teach.md new file mode 100644 index 00000000..785d08a8 --- /dev/null +++ b/.changeset/new-fans-teach.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-core': minor +--- + +Optimized metrics initialization and configuration for extensibility. diff --git a/.changeset/silent-tips-divide.md b/.changeset/silent-tips-divide.md new file mode 100644 index 00000000..ca238b46 --- /dev/null +++ b/.changeset/silent-tips-divide.md @@ -0,0 +1,5 @@ +--- +'@powersync/service-core': minor +--- + +Added ability to extend configuration collection process. diff --git a/.changeset/wicked-geese-teach.md b/.changeset/wicked-geese-teach.md new file mode 100644 index 00000000..1284ebd9 --- /dev/null +++ b/.changeset/wicked-geese-teach.md @@ -0,0 +1,5 @@ +--- +'@powersync/lib-services-framework': minor +--- + +Added ability for generic implementation registration and fetching on the Container. diff --git a/libs/lib-services/src/container.ts b/libs/lib-services/src/container.ts index 0448a9f0..4e015284 100644 --- a/libs/lib-services/src/container.ts +++ b/libs/lib-services/src/container.ts @@ -23,14 +23,34 @@ export type ContainerImplementationDefaultGenerators = { [type in ContainerImplementation]: () => ContainerImplementationTypes[type]; }; +/** + * Helper for identifying constructors + */ +export interface Abstract { + prototype: T; +} +/** + * A basic class constructor + */ +export type Newable = new (...args: never[]) => T; + +/** + * Identifier used to get and register implementations + */ +export type ServiceIdentifier = string | symbol | Newable | Abstract | ContainerImplementation; + const DEFAULT_GENERATORS: ContainerImplementationDefaultGenerators = { [ContainerImplementation.REPORTER]: () => NoOpReporter, [ContainerImplementation.PROBES]: () => createFSProbe(), [ContainerImplementation.TERMINATION_HANDLER]: () => createTerminationHandler() }; +/** + * A container which provides means for registering and getting various + * function implementations. + */ export class Container { - protected implementations: Partial; + protected implementations: Map, any>; /** * Manager for system health probes @@ -54,13 +74,39 @@ export class Container { } constructor() { - this.implementations = {}; + this.implementations = new Map(); + } + + /** + * Gets an implementation given an identifier. + * An exception is thrown if the implementation has not been registered. + * Core [ContainerImplementation] identifiers are mapped to their respective implementation types. + * This also allows for getting generic implementations (unknown to the core framework) which have been registered. + */ + getImplementation(identifier: Newable | Abstract): T; + getImplementation(identifier: T): ContainerImplementationTypes[T]; + getImplementation(identifier: ServiceIdentifier): T; + getImplementation(identifier: ServiceIdentifier): T { + const implementation = this.implementations.get(identifier); + if (!implementation) { + throw new Error(`Implementation for ${String(identifier)} has not been registered.`); + } + return implementation; } - getImplementation(type: Type) { - const implementation = this.implementations[type]; + /** + * Gets an implementation given an identifier. + * Null is returned if the implementation has not been registered yet. + * Core [ContainerImplementation] identifiers are mapped to their respective implementation types. + * This also allows for getting generic implementations (unknown to the core framework) which have been registered. + */ + getOptional(identifier: Newable | Abstract): T | null; + getOptional(identifier: T): ContainerImplementationTypes[T] | null; + getOptional(identifier: ServiceIdentifier): T | null; + getOptional(identifier: ServiceIdentifier): T | null { + const implementation = this.implementations.get(identifier); if (!implementation) { - throw new Error(`Implementation for ${type} has not been registered.`); + return null; } return implementation; } @@ -71,15 +117,15 @@ export class Container { registerDefaults(options?: RegisterDefaultsOptions) { _.difference(Object.values(ContainerImplementation), options?.skip ?? []).forEach((type) => { const generator = DEFAULT_GENERATORS[type]; - this.implementations[type] = generator() as any; // :( + this.register(type, generator()); }); } /** - * Allows for overriding a default implementation + * Allows for registering core and generic implementations of services/helpers. */ - register(type: Type, implementation: ContainerImplementationTypes[Type]) { - this.implementations[type] = implementation; + register(identifier: ServiceIdentifier, implementation: T) { + this.implementations.set(identifier, implementation); } } diff --git a/packages/service-core/package.json b/packages/service-core/package.json index 2bc167e1..316c44be 100644 --- a/packages/service-core/package.json +++ b/packages/service-core/package.json @@ -18,9 +18,7 @@ "dependencies": { "@js-sdsl/ordered-set": "^4.4.2", "@opentelemetry/api": "~1.8.0", - "@opentelemetry/exporter-metrics-otlp-http": "^0.51.1", "@opentelemetry/exporter-prometheus": "^0.51.1", - "@opentelemetry/resources": "^1.24.1", "@opentelemetry/sdk-metrics": "1.24.1", "@powersync/lib-services-framework": "workspace:*", "@powersync/service-jpgwire": "workspace:*", diff --git a/packages/service-core/src/metrics/Metrics.ts b/packages/service-core/src/metrics/Metrics.ts index 2ec75910..e5734611 100644 --- a/packages/service-core/src/metrics/Metrics.ts +++ b/packages/service-core/src/metrics/Metrics.ts @@ -1,12 +1,9 @@ import { Attributes, Counter, ObservableGauge, UpDownCounter, ValueType } from '@opentelemetry/api'; import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'; -import { MeterProvider, MetricReader, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics'; -import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http'; +import { MeterProvider } from '@opentelemetry/sdk-metrics'; import * as jpgwire from '@powersync/service-jpgwire'; -import * as util from '../util/util-index.js'; import * as storage from '../storage/storage-index.js'; import { CorePowerSyncSystem } from '../system/CorePowerSyncSystem.js'; -import { Resource } from '@opentelemetry/resources'; import { logger } from '@powersync/lib-services-framework'; export interface MetricsOptions { @@ -16,8 +13,6 @@ export interface MetricsOptions { } export class Metrics { - private static instance: Metrics; - private prometheusExporter: PrometheusExporter; private meterProvider: MeterProvider; @@ -60,7 +55,7 @@ export class Metrics { // Record on API pod public concurrent_connections: UpDownCounter; - private constructor(meterProvider: MeterProvider, prometheusExporter: PrometheusExporter) { + constructor(meterProvider: MeterProvider, prometheusExporter: PrometheusExporter) { this.meterProvider = meterProvider; this.prometheusExporter = prometheusExporter; const meter = meterProvider.getMeter('powersync'); @@ -132,66 +127,6 @@ export class Metrics { this.concurrent_connections.add(0); } - public static getInstance(): Metrics { - if (!Metrics.instance) { - throw new Error('Metrics have not been initialised'); - } - - return Metrics.instance; - } - - public static async initialise(options: MetricsOptions): Promise { - if (Metrics.instance) { - return; - } - logger.info('Configuring telemetry.'); - - logger.info( - ` -Attention: -PowerSync collects completely anonymous telemetry regarding usage. -This information is used to shape our roadmap to better serve our customers. -You can learn more, including how to opt-out if you'd not like to participate in this anonymous program, by visiting the following URL: -https://docs.powersync.com/self-hosting/telemetry -Anonymous telemetry is currently: ${options.disable_telemetry_sharing ? 'disabled' : 'enabled'} - `.trim() - ); - - const configuredExporters: MetricReader[] = []; - - const port: number = util.env.METRICS_PORT ?? 0; - const prometheusExporter = new PrometheusExporter({ port: port, preventServerStart: true }); - configuredExporters.push(prometheusExporter); - - if (!options.disable_telemetry_sharing) { - logger.info('Sharing anonymous telemetry'); - const periodicExporter = new PeriodicExportingMetricReader({ - exporter: new OTLPMetricExporter({ - url: options.internal_metrics_endpoint - }), - exportIntervalMillis: 1000 * 60 * 5 // 5 minutes - }); - - configuredExporters.push(periodicExporter); - } - - const meterProvider = new MeterProvider({ - resource: new Resource({ - ['service']: 'PowerSync', - ['instance_id']: options.powersync_instance_id - }), - readers: configuredExporters - }); - - if (port > 0) { - await prometheusExporter.startServer(); - } - - Metrics.instance = new Metrics(meterProvider, prometheusExporter); - - logger.info('Telemetry configuration complete.'); - } - public async shutdown(): Promise { await this.meterProvider.shutdown(); } diff --git a/packages/service-core/src/replication/WalStream.ts b/packages/service-core/src/replication/WalStream.ts index 95eaecd4..79560cab 100644 --- a/packages/service-core/src/replication/WalStream.ts +++ b/packages/service-core/src/replication/WalStream.ts @@ -406,7 +406,7 @@ WHERE oid = $1::regclass`, await batch.save({ tag: 'insert', sourceTable: table, before: undefined, after: record }); } at += rows.length; - Metrics.getInstance().rows_replicated_total.add(rows.length); + container.getImplementation(Metrics).rows_replicated_total.add(rows.length); await touch(); } @@ -492,19 +492,21 @@ WHERE oid = $1::regclass`, return null; } + const metrics = container.getImplementation(Metrics); + if (msg.tag == 'insert') { - Metrics.getInstance().rows_replicated_total.add(1); + metrics.rows_replicated_total.add(1); const baseRecord = util.constructAfterRecord(msg); return await batch.save({ tag: 'insert', sourceTable: table, before: undefined, after: baseRecord }); } else if (msg.tag == 'update') { - Metrics.getInstance().rows_replicated_total.add(1); + metrics.rows_replicated_total.add(1); // "before" may be null if the replica id columns are unchanged // It's fine to treat that the same as an insert. const before = util.constructBeforeRecord(msg); const after = util.constructAfterRecord(msg); return await batch.save({ tag: 'update', sourceTable: table, before: before, after: after }); } else if (msg.tag == 'delete') { - Metrics.getInstance().rows_replicated_total.add(1); + metrics.rows_replicated_total.add(1); const before = util.constructBeforeRecord(msg)!; return await batch.save({ tag: 'delete', sourceTable: table, before: before, after: undefined }); @@ -555,6 +557,8 @@ WHERE oid = $1::regclass`, // Auto-activate as soon as initial replication is done await this.storage.autoActivate(); + const metrics = container.getImplementation(Metrics); + await this.storage.startBatch({}, async (batch) => { // Replication never starts in the middle of a transaction let inTx = false; @@ -577,7 +581,7 @@ WHERE oid = $1::regclass`, } else if (msg.tag == 'begin') { inTx = true; } else if (msg.tag == 'commit') { - Metrics.getInstance().transactions_replicated_total.add(1); + metrics.transactions_replicated_total.add(1); inTx = false; await batch.commit(msg.lsn!); await this.ack(msg.lsn!, replicationStream); @@ -602,7 +606,7 @@ WHERE oid = $1::regclass`, } } - Metrics.getInstance().chunks_replicated_total.add(1); + metrics.chunks_replicated_total.add(1); } }); } diff --git a/packages/service-core/src/routes/endpoints/socket-route.ts b/packages/service-core/src/routes/endpoints/socket-route.ts index 799f9440..06bbabb0 100644 --- a/packages/service-core/src/routes/endpoints/socket-route.ts +++ b/packages/service-core/src/routes/endpoints/socket-route.ts @@ -1,4 +1,4 @@ -import { errors, logger, schema } from '@powersync/lib-services-framework'; +import { container, errors, logger, schema } from '@powersync/lib-services-framework'; import { RequestParameters } from '@powersync/service-sync-rules'; import { serialize } from 'bson'; @@ -66,7 +66,9 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => observer.triggerCancel(); }); - Metrics.getInstance().concurrent_connections.add(1); + const metrics = container.getImplementation(Metrics); + + metrics.concurrent_connections.add(1); const tracker = new RequestTracker(); try { for await (const data of streamResponse({ @@ -134,7 +136,7 @@ export const syncStreamReactive: SocketRouteGenerator = (router) => operations_synced: tracker.operationsSynced, data_synced_bytes: tracker.dataSyncedBytes }); - Metrics.getInstance().concurrent_connections.add(-1); + metrics.concurrent_connections.add(-1); } } }); diff --git a/packages/service-core/src/routes/endpoints/sync-stream.ts b/packages/service-core/src/routes/endpoints/sync-stream.ts index 404bdb26..fd0b9461 100644 --- a/packages/service-core/src/routes/endpoints/sync-stream.ts +++ b/packages/service-core/src/routes/endpoints/sync-stream.ts @@ -1,4 +1,4 @@ -import { errors, logger, router, schema } from '@powersync/lib-services-framework'; +import { container, errors, logger, router, schema } from '@powersync/lib-services-framework'; import { RequestParameters } from '@powersync/service-sync-rules'; import { Readable } from 'stream'; @@ -43,10 +43,11 @@ export const syncStreamed = routeDefinition({ description: 'No sync rules available' }); } + const metrics = container.getImplementation(Metrics); const controller = new AbortController(); const tracker = new RequestTracker(); try { - Metrics.getInstance().concurrent_connections.add(1); + metrics.concurrent_connections.add(1); const stream = Readable.from( sync.transformToBytesTracked( sync.ndjson( @@ -89,7 +90,7 @@ export const syncStreamed = routeDefinition({ data: stream, afterSend: async () => { controller.abort(); - Metrics.getInstance().concurrent_connections.add(-1); + metrics.concurrent_connections.add(-1); logger.info(`Sync stream complete`, { user_id: syncParams.user_id, operations_synced: tracker.operationsSynced, @@ -99,7 +100,7 @@ export const syncStreamed = routeDefinition({ }); } catch (ex) { controller.abort(); - Metrics.getInstance().concurrent_connections.add(-1); + metrics.concurrent_connections.add(-1); } } }); diff --git a/packages/service-core/src/sync/RequestTracker.ts b/packages/service-core/src/sync/RequestTracker.ts index 81d717d2..1b6943ba 100644 --- a/packages/service-core/src/sync/RequestTracker.ts +++ b/packages/service-core/src/sync/RequestTracker.ts @@ -1,3 +1,4 @@ +import { container } from '@powersync/lib-services-framework'; import { Metrics } from '../metrics/Metrics.js'; /** @@ -9,13 +10,12 @@ export class RequestTracker { addOperationsSynced(operations: number) { this.operationsSynced += operations; - - Metrics.getInstance().operations_synced_total.add(operations); + container.getImplementation(Metrics).operations_synced_total.add(operations); } addDataSynced(bytes: number) { this.dataSyncedBytes += bytes; - Metrics.getInstance().data_synced_bytes.add(bytes); + container.getImplementation(Metrics).data_synced_bytes.add(bytes); } } diff --git a/packages/service-core/src/sync/sync.ts b/packages/service-core/src/sync/sync.ts index 8659e358..ca394d87 100644 --- a/packages/service-core/src/sync/sync.ts +++ b/packages/service-core/src/sync/sync.ts @@ -8,7 +8,6 @@ import * as storage from '../storage/storage-index.js'; import * as util from '../util/util-index.js'; import { logger } from '@powersync/lib-services-framework'; -import { Metrics } from '../metrics/Metrics.js'; import { mergeAsyncIterables } from './merge.js'; import { TokenStreamOptions, tokenStream } from './util.js'; import { RequestTracker } from './RequestTracker.js'; diff --git a/packages/service-core/src/sync/util.ts b/packages/service-core/src/sync/util.ts index 36270648..2437d38f 100644 --- a/packages/service-core/src/sync/util.ts +++ b/packages/service-core/src/sync/util.ts @@ -1,7 +1,6 @@ import * as timers from 'timers/promises'; import * as util from '../util/util-index.js'; -import { Metrics } from '../metrics/Metrics.js'; import { RequestTracker } from './RequestTracker.js'; export type TokenStreamOptions = { diff --git a/packages/service-core/src/util/config/collectors/config-collector.ts b/packages/service-core/src/util/config/collectors/config-collector.ts index 86b25c65..ba74fa40 100644 --- a/packages/service-core/src/util/config/collectors/config-collector.ts +++ b/packages/service-core/src/util/config/collectors/config-collector.ts @@ -1,8 +1,6 @@ -import * as t from 'ts-codec'; import * as yaml from 'yaml'; import { configFile } from '@powersync/service-types'; -import { schema } from '@powersync/lib-services-framework'; import { RunnerConfig } from '../types.js'; @@ -23,13 +21,6 @@ export enum ConfigFileFormat { */ const YAML_ENV_PREFIX = 'PS_'; -// ts-codec itself doesn't give great validation errors, so we use json schema for that -const configSchemaValidator = schema - .parseJSONSchema( - t.generateJSONSchema(configFile.powerSyncConfig, { allowAdditional: true, parsers: [configFile.portParser] }) - ) - .validator(); - export abstract class ConfigCollector { abstract get name(): string; @@ -39,45 +30,6 @@ export abstract class ConfigCollector { */ abstract collectSerialized(runnerConfig: RunnerConfig): Promise; - /** - * Collects the PowerSyncConfig settings. - * Validates and decodes the config. - * @returns null if this collector cannot provide a config - */ - async collect(runner_config: RunnerConfig): Promise { - const serialized = await this.collectSerialized(runner_config); - if (!serialized) { - return null; - } - - /** - * After this point a serialized config has been found. Any failures to decode or validate - * will result in a hard stop. - */ - const decoded = this.decode(serialized); - this.validate(decoded); - return decoded; - } - - /** - * Validates input config - * ts-codec itself doesn't give great validation errors, so we use json schema for that - */ - validate(config: configFile.PowerSyncConfig) { - const valid = configSchemaValidator.validate(config); - if (!valid.valid) { - throw new Error(`Failed to validate PowerSync config: ${valid.errors.join(', ')}`); - } - } - - decode(encoded: configFile.SerializedPowerSyncConfig): configFile.PowerSyncConfig { - try { - return configFile.powerSyncConfig.decode(encoded); - } catch (ex) { - throw new Error(`Failed to decode PowerSync config: ${ex}`); - } - } - protected parseContent(content: string, contentType?: ConfigFileFormat) { switch (contentType) { case ConfigFileFormat.YAML: diff --git a/packages/service-core/src/util/config/compound-config-collector.ts b/packages/service-core/src/util/config/compound-config-collector.ts index 3ae85660..353b7296 100644 --- a/packages/service-core/src/util/config/compound-config-collector.ts +++ b/packages/service-core/src/util/config/compound-config-collector.ts @@ -1,3 +1,4 @@ +import * as t from 'ts-codec'; import { configFile, normalizeConnection } from '@powersync/service-types'; import { ConfigCollector } from './collectors/config-collector.js'; import { ResolvedConnection, ResolvedPowerSyncConfig, RunnerConfig, SyncRulesConfig } from './types.js'; @@ -9,7 +10,7 @@ import { Base64SyncRulesCollector } from './sync-rules/impl/base64-sync-rules-co import { InlineSyncRulesCollector } from './sync-rules/impl/inline-sync-rules-collector.js'; import { FileSystemSyncRulesCollector } from './sync-rules/impl/filesystem-sync-rules-collector.js'; import { FallbackConfigCollector } from './collectors/impl/fallback-config-collector.js'; -import { logger } from '@powersync/lib-services-framework'; +import { logger, schema } from '@powersync/lib-services-framework'; const POWERSYNC_DEV_KID = 'powersync-dev'; @@ -28,6 +29,12 @@ export type CompoundConfigCollectorOptions = { syncRulesCollectors: SyncRulesCollector[]; }; +export type ConfigCollectorGenerics = { + SERIALIZED: configFile.SerializedPowerSyncConfig; + DESERIALIZED: configFile.PowerSyncConfig; + RESOLVED: ResolvedPowerSyncConfig; +}; + const DEFAULT_COLLECTOR_OPTIONS: CompoundConfigCollectorOptions = { configCollectors: [new Base64ConfigCollector(), new FileSystemConfigCollector(), new FallbackConfigCollector()], syncRulesCollectors: [ @@ -37,15 +44,56 @@ const DEFAULT_COLLECTOR_OPTIONS: CompoundConfigCollectorOptions = { ] }; -export class CompoundConfigCollector { +export class CompoundConfigCollector { constructor(protected options: CompoundConfigCollectorOptions = DEFAULT_COLLECTOR_OPTIONS) {} + /** + * The default ts-codec for validations and decoding + */ + get codec(): t.AnyCodec { + return configFile.powerSyncConfig; + } + /** * Collects and resolves base config */ - async collectConfig(runner_config: RunnerConfig = {}): Promise { - const baseConfig = await this.collectBaseConfig(runner_config); + async collectConfig(runnerConfig: RunnerConfig = {}): Promise { + const baseConfig = await this.collectBaseConfig(runnerConfig); + const baseResolvedConfig = await this.resolveBaseConfig(baseConfig, runnerConfig); + return this.resolveConfig(baseConfig, baseResolvedConfig, runnerConfig); + } + + /** + * Collects the base PowerSyncConfig from various registered collectors. + * @throws if no collector could return a configuration. + */ + protected async collectBaseConfig(runner_config: RunnerConfig): Promise { + for (const collector of this.options.configCollectors) { + try { + const baseConfig = await collector.collectSerialized(runner_config); + if (baseConfig) { + const decoded = this.decode(baseConfig); + this.validate(decoded); + return decoded; + } + logger.debug( + `Could not collect PowerSync config with ${collector.name} method. Moving on to next method if available.` + ); + } catch (ex) { + // An error in a collector is a hard stop + throw new Error(`Could not collect config using ${collector.name} method. Caught exception: ${ex}`); + } + } + throw new Error('PowerSyncConfig could not be collected using any of the registered config collectors.'); + } + /** + * Performs the resolving of the common (shared) base configuration + */ + protected async resolveBaseConfig( + baseConfig: Generics['DESERIALIZED'], + runnerConfig: RunnerConfig = {} + ): Promise { const connections = baseConfig.replication?.connections ?? []; if (connections.length > 1) { throw new Error('Only a single replication connection is supported currently'); @@ -93,7 +141,7 @@ export class CompoundConfigCollector { devKey = await auth.KeySpec.importKey(baseDevKey); } - const sync_rules = await this.collectSyncRules(baseConfig, runner_config); + const sync_rules = await this.collectSyncRules(baseConfig, runnerConfig); let jwt_audiences: string[] = baseConfig.client_auth?.audience ?? []; @@ -130,25 +178,17 @@ export class CompoundConfigCollector { } /** - * Collects the base PowerSyncConfig from various registered collectors. - * @throws if no collector could return a configuration. + * Perform any additional resolving from {@link ResolvedPowerSyncConfig} + * to the extended {@link Generics['RESOLVED']} + * */ - protected async collectBaseConfig(runner_config: RunnerConfig): Promise { - for (const collector of this.options.configCollectors) { - try { - const baseConfig = await collector.collect(runner_config); - if (baseConfig) { - return baseConfig; - } - logger.debug( - `Could not collect PowerSync config with ${collector.name} method. Moving on to next method if available.` - ); - } catch (ex) { - // An error in a collector is a hard stop - throw new Error(`Could not collect config using ${collector.name} method. Caught exception: ${ex}`); - } - } - throw new Error('PowerSyncConfig could not be collected using any of the registered config collectors.'); + protected async resolveConfig( + baseConfig: Generics['DESERIALIZED'], + resolvedBaseConfig: ResolvedPowerSyncConfig, + runnerConfig: RunnerConfig = {} + ): Promise { + // The base version has ResolvedPowerSyncConfig == Generics['RESOLVED'] + return resolvedBaseConfig; } protected async collectSyncRules( @@ -173,4 +213,28 @@ export class CompoundConfigCollector { present: false }; } + + /** + * Validates input config + * ts-codec itself doesn't give great validation errors, so we use json schema for that + */ + protected validate(config: Generics['DESERIALIZED']) { + // ts-codec itself doesn't give great validation errors, so we use json schema for that + const validator = schema + .parseJSONSchema(t.generateJSONSchema(this.codec, { allowAdditional: true, parsers: [configFile.portParser] })) + .validator(); + + const valid = validator.validate(config); + if (!valid.valid) { + throw new Error(`Failed to validate PowerSync config: ${valid.errors.join(', ')}`); + } + } + + protected decode(encoded: Generics['SERIALIZED']): Generics['DESERIALIZED'] { + try { + return this.codec.decode(encoded); + } catch (ex) { + throw new Error(`Failed to decode PowerSync config: ${ex}`); + } + } } diff --git a/packages/service-core/test/src/util.ts b/packages/service-core/test/src/util.ts index ce2f255b..bf1d6085 100644 --- a/packages/service-core/test/src/util.ts +++ b/packages/service-core/test/src/util.ts @@ -7,14 +7,20 @@ import { PowerSyncMongo } from '../../src/storage/mongo/db.js'; import { escapeIdentifier } from '../../src/util/pgwire_utils.js'; import { env } from './env.js'; import { Metrics } from '@/metrics/Metrics.js'; +import { container } from '@powersync/lib-services-framework'; +import { MeterProvider } from '@opentelemetry/sdk-metrics'; +import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'; // The metrics need to be initialised before they can be used -await Metrics.initialise({ - disable_telemetry_sharing: true, - powersync_instance_id: 'test', - internal_metrics_endpoint: 'unused.for.tests.com' -}); -Metrics.getInstance().resetCounters(); +const prometheus = new PrometheusExporter(); +const metrics = new Metrics( + new MeterProvider({ + readers: [prometheus] + }), + prometheus +); +container.register(Metrics, metrics); +metrics.resetCounters(); export const TEST_URI = env.PG_TEST_URL; diff --git a/packages/service-core/test/src/wal_stream.test.ts b/packages/service-core/test/src/wal_stream.test.ts index a6cb83fa..1f2a4204 100644 --- a/packages/service-core/test/src/wal_stream.test.ts +++ b/packages/service-core/test/src/wal_stream.test.ts @@ -5,6 +5,7 @@ import { MONGO_STORAGE_FACTORY } from './util.js'; import { putOp, removeOp, walStreamTest } from './wal_stream_utils.js'; import { pgwireRows } from '@powersync/service-jpgwire'; import { Metrics } from '@/metrics/Metrics.js'; +import { container } from '@powersync/lib-services-framework'; type StorageFactory = () => Promise; @@ -41,10 +42,9 @@ bucket_definitions: await context.replicateSnapshot(); - const startRowCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; - const startTxCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + const metrics = container.getImplementation(Metrics); + const startRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; context.startStreaming(); @@ -59,9 +59,8 @@ bucket_definitions: expect(data).toMatchObject([ putOp('test_data', { id: test_id, description: 'test1', num: 1152921504606846976n }) ]); - const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; - const endTxCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + const endRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; expect(endRowCount - startRowCount).toEqual(1); expect(endTxCount - startTxCount).toEqual(1); }) @@ -83,10 +82,9 @@ bucket_definitions: await context.replicateSnapshot(); - const startRowCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; - const startTxCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + const metrics = container.getImplementation(Metrics); + const startRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; context.startStreaming(); @@ -97,9 +95,8 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([putOp('test_DATA', { id: test_id, description: 'test1' })]); - const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; - const endTxCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + const endRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; expect(endRowCount - startRowCount).toEqual(1); expect(endTxCount - startTxCount).toEqual(1); }) @@ -293,10 +290,9 @@ bucket_definitions: await context.replicateSnapshot(); - const startRowCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; - const startTxCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + const metrics = container.getImplementation(Metrics); + const startRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const startTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; context.startStreaming(); @@ -307,9 +303,8 @@ bucket_definitions: const data = await context.getBucketData('global[]'); expect(data).toMatchObject([]); - const endRowCount = (await Metrics.getInstance().getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; - const endTxCount = - (await Metrics.getInstance().getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; + const endRowCount = (await metrics.getMetricValueForTests('powersync_rows_replicated_total')) ?? 0; + const endTxCount = (await metrics.getMetricValueForTests('powersync_transactions_replicated_total')) ?? 0; // There was a transaction, but we should not replicate any actual data expect(endRowCount - startRowCount).toEqual(0); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3134582d..5a309786 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -154,15 +154,9 @@ importers: '@opentelemetry/api': specifier: ~1.8.0 version: 1.8.0 - '@opentelemetry/exporter-metrics-otlp-http': - specifier: ^0.51.1 - version: 0.51.1(@opentelemetry/api@1.8.0) '@opentelemetry/exporter-prometheus': specifier: ^0.51.1 version: 0.51.1(@opentelemetry/api@1.8.0) - '@opentelemetry/resources': - specifier: ^1.24.1 - version: 1.24.1(@opentelemetry/api@1.8.0) '@opentelemetry/sdk-metrics': specifier: 1.24.1 version: 1.24.1(@opentelemetry/api@1.8.0) @@ -301,14 +295,20 @@ importers: specifier: 8.4.1 version: 8.4.1 '@opentelemetry/api': - specifier: ~1.6.0 - version: 1.6.0 + specifier: ~1.8.0 + version: 1.8.0 + '@opentelemetry/exporter-metrics-otlp-http': + specifier: ^0.51.1 + version: 0.51.1(@opentelemetry/api@1.8.0) '@opentelemetry/exporter-prometheus': - specifier: ^0.43.0 - version: 0.43.0(@opentelemetry/api@1.6.0) + specifier: ^0.51.1 + version: 0.51.1(@opentelemetry/api@1.8.0) + '@opentelemetry/resources': + specifier: ^1.24.1 + version: 1.25.0(@opentelemetry/api@1.8.0) '@opentelemetry/sdk-metrics': - specifier: ^1.17.0 - version: 1.24.1(@opentelemetry/api@1.6.0) + specifier: 1.24.1 + version: 1.24.1(@opentelemetry/api@1.8.0) '@powersync/lib-services-framework': specifier: workspace:* version: link:../libs/lib-services @@ -744,10 +744,6 @@ packages: resolution: {integrity: sha512-HxjD7xH9iAE4OyhNaaSec65i1H6QZYBWSwWkowFfsc5YAcDvJG30/J1sRKXEQqdmUcKTXEAnA66UciqZha/4+Q==} engines: {node: '>=14'} - '@opentelemetry/api@1.6.0': - resolution: {integrity: sha512-OWlrQAnWn9577PhVgqjUvMr1pg57Bc4jv0iL4w0PRuOSRvq67rvHW9Ie/dZVMvCzhSCB+UxhcY/PmCmFj33Q+g==} - engines: {node: '>=8.0.0'} - '@opentelemetry/api@1.8.0': resolution: {integrity: sha512-I/s6F7yKUDdtMsoBWXJe8Qz40Tui5vsuKCWJEWVL+5q9sSWRzzx6v2KeNsOBEwd94j0eWkpWCH4yB6rZg9Mf0w==} engines: {node: '>=8.0.0'} @@ -762,12 +758,6 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/core@1.17.0': - resolution: {integrity: sha512-tfnl3h+UefCgx1aeN2xtrmr6BmdWGKXypk0pflQR0urFS40aE88trnkOMc2HTJZbMrqEEl4HsaBeFhwLVXsrJg==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.7.0' - '@opentelemetry/core@1.24.1': resolution: {integrity: sha512-wMSGfsdmibI88K9wB498zXY04yThPexo8jvwNNlm542HZB7XrrMRBbAyKJqG8qDRJwIBdBrPMi4V9ZPW/sqrcg==} engines: {node: '>=14'} @@ -786,12 +776,6 @@ packages: peerDependencies: '@opentelemetry/api': ^1.3.0 - '@opentelemetry/exporter-prometheus@0.43.0': - resolution: {integrity: sha512-tJeZVmzzeG98BMPssrnUYZ7AdMtZEYqgOL44z/bF4YWqGePQoelmxuTn8Do0tIyBURqr0Whi/7P5/XxWMK1zTw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': ^1.3.0 - '@opentelemetry/exporter-prometheus@0.51.1': resolution: {integrity: sha512-c8TrTlLm9JJRIHW6MtFv6ESoZRgXBXD/YrTRYylWiyYBOVbYHo1c5Qaw/j/thXDhkmYOYAn4LAhJZpLl5gBFEQ==} engines: {node: '>=14'} @@ -922,12 +906,6 @@ packages: resolution: {integrity: sha512-faYX1N0gpLhej/6nyp6bgRjzAKXn5GOEMYY7YhciSfCoITAktLUtQ36d24QEWNA1/WA1y6qQunCe0OhHRkVl9g==} engines: {node: '>=14'} - '@opentelemetry/resources@1.17.0': - resolution: {integrity: sha512-+u0ciVnj8lhuL/qGRBPeVYvk7fL+H/vOddfvmOeJaA1KC+5/3UED1c9KoZQlRsNT5Kw1FaK8LkY2NVLYfOVZQw==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.0.0 <1.7.0' - '@opentelemetry/resources@1.24.1': resolution: {integrity: sha512-cyv0MwAaPF7O86x5hk3NNgenMObeejZFLJJDVuSeSMIsknlsj3oOZzRv3qSzlwYomXsICfBeFFlxwHQte5mGXQ==} engines: {node: '>=14'} @@ -947,12 +925,6 @@ packages: '@opentelemetry/api': '>=1.4.0 <1.9.0' '@opentelemetry/api-logs': '>=0.39.1' - '@opentelemetry/sdk-metrics@1.17.0': - resolution: {integrity: sha512-HlWM27yGmYuwCoVRe3yg2PqKnIsq0kEF0HQgvkeDWz2NYkq9fFaSspR6kvjxUTbghAlZrabiqbgyKoYpYaXS3w==} - engines: {node: '>=14'} - peerDependencies: - '@opentelemetry/api': '>=1.3.0 <1.7.0' - '@opentelemetry/sdk-metrics@1.24.1': resolution: {integrity: sha512-FrAqCbbGao9iKI+Mgh+OsC9+U2YMoXnlDHe06yH7dvavCKzE3S892dGtX54+WhSFVxHR/TMRVJiK/CV93GR0TQ==} engines: {node: '>=14'} @@ -971,10 +943,6 @@ packages: peerDependencies: '@opentelemetry/api': '>=1.0.0 <1.10.0' - '@opentelemetry/semantic-conventions@1.17.0': - resolution: {integrity: sha512-+fguCd2d8d2qruk0H0DsCEy2CTK3t0Tugg7MhZ/UQMvmewbZLNnJ6heSYyzIZWG5IPfAXzoj4f4F/qpM7l4VBA==} - engines: {node: '>=14'} - '@opentelemetry/semantic-conventions@1.24.1': resolution: {integrity: sha512-VkliWlS4/+GHLLW7J/rVBA00uXus1SWvwFvcUDxDwmFxYfg/2VI6ekwdXS28cjI8Qz2ky2BzG8OUHo+WeYIWqw==} engines: {node: '>=14'} @@ -4499,8 +4467,6 @@ snapshots: dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/api@1.6.0': {} - '@opentelemetry/api@1.8.0': {} '@opentelemetry/api@1.9.0': {} @@ -4509,16 +4475,6 @@ snapshots: dependencies: '@opentelemetry/api': 1.9.0 - '@opentelemetry/core@1.17.0(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/semantic-conventions': 1.17.0 - - '@opentelemetry/core@1.24.1(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/semantic-conventions': 1.24.1 - '@opentelemetry/core@1.24.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 @@ -4529,11 +4485,6 @@ snapshots: '@opentelemetry/api': 1.9.0 '@opentelemetry/semantic-conventions': 1.24.1 - '@opentelemetry/core@1.25.0(@opentelemetry/api@1.8.0)': - dependencies: - '@opentelemetry/api': 1.8.0 - '@opentelemetry/semantic-conventions': 1.25.0 - '@opentelemetry/core@1.25.0(@opentelemetry/api@1.9.0)': dependencies: '@opentelemetry/api': 1.9.0 @@ -4548,13 +4499,6 @@ snapshots: '@opentelemetry/resources': 1.24.1(@opentelemetry/api@1.8.0) '@opentelemetry/sdk-metrics': 1.24.1(@opentelemetry/api@1.8.0) - '@opentelemetry/exporter-prometheus@0.43.0(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/core': 1.17.0(@opentelemetry/api@1.6.0) - '@opentelemetry/resources': 1.17.0(@opentelemetry/api@1.6.0) - '@opentelemetry/sdk-metrics': 1.17.0(@opentelemetry/api@1.6.0) - '@opentelemetry/exporter-prometheus@0.51.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 @@ -4753,18 +4697,6 @@ snapshots: '@opentelemetry/redis-common@0.36.2': {} - '@opentelemetry/resources@1.17.0(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/core': 1.17.0(@opentelemetry/api@1.6.0) - '@opentelemetry/semantic-conventions': 1.17.0 - - '@opentelemetry/resources@1.24.1(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/core': 1.24.1(@opentelemetry/api@1.6.0) - '@opentelemetry/semantic-conventions': 1.24.1 - '@opentelemetry/resources@1.24.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 @@ -4780,7 +4712,7 @@ snapshots: '@opentelemetry/resources@1.25.0(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.8.0) + '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.25.0 '@opentelemetry/resources@1.25.0(@opentelemetry/api@1.9.0)': @@ -4796,20 +4728,6 @@ snapshots: '@opentelemetry/core': 1.24.1(@opentelemetry/api@1.8.0) '@opentelemetry/resources': 1.24.1(@opentelemetry/api@1.8.0) - '@opentelemetry/sdk-metrics@1.17.0(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/core': 1.17.0(@opentelemetry/api@1.6.0) - '@opentelemetry/resources': 1.17.0(@opentelemetry/api@1.6.0) - lodash.merge: 4.6.2 - - '@opentelemetry/sdk-metrics@1.24.1(@opentelemetry/api@1.6.0)': - dependencies: - '@opentelemetry/api': 1.6.0 - '@opentelemetry/core': 1.24.1(@opentelemetry/api@1.6.0) - '@opentelemetry/resources': 1.24.1(@opentelemetry/api@1.6.0) - lodash.merge: 4.6.2 - '@opentelemetry/sdk-metrics@1.24.1(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 @@ -4834,8 +4752,8 @@ snapshots: '@opentelemetry/sdk-trace-base@1.25.0(@opentelemetry/api@1.8.0)': dependencies: '@opentelemetry/api': 1.8.0 - '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.8.0) - '@opentelemetry/resources': 1.25.0(@opentelemetry/api@1.8.0) + '@opentelemetry/core': 1.25.0(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.25.0 '@opentelemetry/sdk-trace-base@1.25.0(@opentelemetry/api@1.9.0)': @@ -4845,8 +4763,6 @@ snapshots: '@opentelemetry/resources': 1.25.0(@opentelemetry/api@1.9.0) '@opentelemetry/semantic-conventions': 1.25.0 - '@opentelemetry/semantic-conventions@1.17.0': {} - '@opentelemetry/semantic-conventions@1.24.1': {} '@opentelemetry/semantic-conventions@1.25.0': {} diff --git a/service/package.json b/service/package.json index e8a68d9c..03e9d944 100644 --- a/service/package.json +++ b/service/package.json @@ -11,11 +11,13 @@ }, "dependencies": { "@fastify/cors": "8.4.1", - "@opentelemetry/api": "~1.6.0", - "@opentelemetry/exporter-prometheus": "^0.43.0", - "@opentelemetry/sdk-metrics": "^1.17.0", - "@powersync/service-core": "workspace:*", + "@opentelemetry/api": "~1.8.0", + "@opentelemetry/exporter-metrics-otlp-http": "^0.51.1", + "@opentelemetry/exporter-prometheus": "^0.51.1", + "@opentelemetry/resources": "^1.24.1", + "@opentelemetry/sdk-metrics": "1.24.1", "@powersync/lib-services-framework": "workspace:*", + "@powersync/service-core": "workspace:*", "@powersync/service-jpgwire": "workspace:*", "@powersync/service-jsonbig": "workspace:*", "@powersync/service-rsocket-router": "workspace:*", diff --git a/service/src/runners/server.ts b/service/src/runners/server.ts index bb525a26..27b1bb68 100644 --- a/service/src/runners/server.ts +++ b/service/src/runners/server.ts @@ -119,7 +119,7 @@ export async function startServer(runnerConfig: core.utils.RunnerConfig) { await system.start(); logger.info('System started'); - core.Metrics.getInstance().configureApiMetrics(); + container.getImplementation(core.Metrics).configureApiMetrics(); await server.listen({ host: '0.0.0.0', diff --git a/service/src/runners/stream-worker.ts b/service/src/runners/stream-worker.ts index 96efef38..d7627a92 100644 --- a/service/src/runners/stream-worker.ts +++ b/service/src/runners/stream-worker.ts @@ -22,7 +22,7 @@ export async function startStreamWorker(runnerConfig: utils.RunnerConfig) { await system.start(); logger.info('System started'); - Metrics.getInstance().configureReplicationMetrics(system); + container.getImplementation(Metrics).configureReplicationMetrics(system); const mngr = new replication.WalStreamManager(system); mngr.start(); diff --git a/service/src/system/PowerSyncSystem.ts b/service/src/system/PowerSyncSystem.ts index 93024dca..dd8a7a7a 100644 --- a/service/src/system/PowerSyncSystem.ts +++ b/service/src/system/PowerSyncSystem.ts @@ -1,5 +1,7 @@ +import { container } from '@powersync/lib-services-framework'; import { db, system, utils, storage, Metrics } from '@powersync/service-core'; import * as pgwire from '@powersync/service-jpgwire'; +import { createMetrics } from '../telemetry/metrics.js'; export class PowerSyncSystem extends system.CorePowerSyncSystem { storage: storage.BucketStorageFactory; @@ -41,14 +43,26 @@ export class PowerSyncSystem extends system.CorePowerSyncSystem { this.withLifecycle(this.storage, { async start(storage) { const instanceId = await storage.getPowerSyncInstanceId(); - await Metrics.initialise({ - powersync_instance_id: instanceId, - disable_telemetry_sharing: config.telemetry.disable_telemetry_sharing, - internal_metrics_endpoint: config.telemetry.internal_service_endpoint - }); + /** + * There should only ever be one instance of Metrics. + * In the unified runner there are two instances of System. + * This check should be sufficient if the runner functions and + * System.start functions are awaited correctly. + */ + const existingMetrics = container.getOptional(Metrics); + if (!existingMetrics) { + container.register( + Metrics, + await createMetrics({ + powersync_instance_id: instanceId, + disable_telemetry_sharing: config.telemetry.disable_telemetry_sharing, + internal_metrics_endpoint: config.telemetry.internal_service_endpoint + }) + ); + } }, async stop() { - await Metrics.getInstance().shutdown(); + await container.getImplementation(Metrics).shutdown(); } }); } diff --git a/service/src/telemetry/metrics.ts b/service/src/telemetry/metrics.ts new file mode 100644 index 00000000..cf892af4 --- /dev/null +++ b/service/src/telemetry/metrics.ts @@ -0,0 +1,55 @@ +import { logger } from '@powersync/lib-services-framework'; +import { metrics } from '@powersync/service-core'; +import { PrometheusExporter } from '@opentelemetry/exporter-prometheus'; +import { MeterProvider, MetricReader, PeriodicExportingMetricReader } from '@opentelemetry/sdk-metrics'; +import { Resource } from '@opentelemetry/resources'; +import { OTLPMetricExporter } from '@opentelemetry/exporter-metrics-otlp-http'; + +/** + * Creates a {@link Metrics} implementation. + * This version will conditionally enable anonymous telemetry. + * An Prometheus exporter is created for internal use - its server is not started or exposed. + */ +export async function createMetrics(options: metrics.MetricsOptions) { + logger.info('Configuring telemetry.'); + + logger.info( + ` + Attention: + PowerSync collects completely anonymous telemetry regarding usage. + This information is used to shape our roadmap to better serve our customers. + You can learn more, including how to opt-out if you'd not like to participate in this anonymous program, by visiting the following URL: + https://docs.powersync.com/self-hosting/telemetry + Anonymous telemetry is currently: ${options.disable_telemetry_sharing ? 'disabled' : 'enabled'} + `.trim() + ); + + const configuredExporters: MetricReader[] = []; + + // This is used internally for tests + const prometheusExporter = new PrometheusExporter({ preventServerStart: true }); + configuredExporters.push(prometheusExporter); + + if (!options.disable_telemetry_sharing) { + logger.info('Sharing anonymous telemetry'); + const periodicExporter = new PeriodicExportingMetricReader({ + exporter: new OTLPMetricExporter({ + url: options.internal_metrics_endpoint + }), + exportIntervalMillis: 1000 * 60 * 5 // 5 minutes + }); + + configuredExporters.push(periodicExporter); + } + + const meterProvider = new MeterProvider({ + resource: new Resource({ + ['service']: 'PowerSync', + ['instance_id']: options.powersync_instance_id + }), + readers: configuredExporters + }); + + logger.info('Telemetry configuration complete.'); + return new metrics.Metrics(meterProvider, prometheusExporter); +}