structure saas with tools
This commit is contained in:
@@ -0,0 +1,582 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import weakref
|
||||
from atexit import register, unregister
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Optional, Sequence
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
from opentelemetry.metrics import Counter as APICounter
|
||||
from opentelemetry.metrics import Histogram as APIHistogram
|
||||
from opentelemetry.metrics import Meter as APIMeter
|
||||
from opentelemetry.metrics import MeterProvider as APIMeterProvider
|
||||
from opentelemetry.metrics import NoOpMeter
|
||||
from opentelemetry.metrics import ObservableCounter as APIObservableCounter
|
||||
from opentelemetry.metrics import ObservableGauge as APIObservableGauge
|
||||
from opentelemetry.metrics import (
|
||||
ObservableUpDownCounter as APIObservableUpDownCounter,
|
||||
)
|
||||
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
|
||||
from opentelemetry.metrics import _Gauge as APIGauge
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_METRICS_EXEMPLAR_FILTER,
|
||||
OTEL_SDK_DISABLED,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
ExemplarFilter,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
_Counter,
|
||||
_Gauge,
|
||||
_Histogram,
|
||||
_ObservableCounter,
|
||||
_ObservableGauge,
|
||||
_ObservableUpDownCounter,
|
||||
_UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.measurement_consumer import (
|
||||
MeasurementConsumer,
|
||||
SynchronousMeasurementConsumer,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
|
||||
SdkConfiguration,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.util._once import Once
|
||||
from opentelemetry.util.types import (
|
||||
Attributes,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class Meter(APIMeter):
|
||||
"""See `opentelemetry.metrics.Meter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: MeasurementConsumer,
|
||||
):
|
||||
super().__init__(
|
||||
name=instrumentation_scope.name,
|
||||
version=instrumentation_scope.version,
|
||||
schema_url=instrumentation_scope.schema_url,
|
||||
)
|
||||
self._instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
self._instrument_id_instrument = {}
|
||||
self._instrument_id_instrument_lock = Lock()
|
||||
|
||||
def create_counter(self, name, unit="", description="") -> APICounter:
|
||||
status = self._register_instrument(name, _Counter, unit, description)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APICounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Counter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_up_down_counter(
|
||||
self, name, unit="", description=""
|
||||
) -> APIUpDownCounter:
|
||||
status = self._register_instrument(
|
||||
name, _UpDownCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _UpDownCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_counter(
|
||||
self,
|
||||
name,
|
||||
callbacks=None,
|
||||
unit="",
|
||||
description="",
|
||||
) -> APIObservableCounter:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_histogram(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
*,
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> APIHistogram:
|
||||
if explicit_bucket_boundaries_advisory is not None:
|
||||
invalid_advisory = False
|
||||
if isinstance(explicit_bucket_boundaries_advisory, Sequence):
|
||||
try:
|
||||
invalid_advisory = not (
|
||||
all(
|
||||
isinstance(e, (float, int))
|
||||
for e in explicit_bucket_boundaries_advisory
|
||||
)
|
||||
)
|
||||
except (KeyError, TypeError):
|
||||
invalid_advisory = True
|
||||
else:
|
||||
invalid_advisory = True
|
||||
|
||||
if invalid_advisory:
|
||||
explicit_bucket_boundaries_advisory = None
|
||||
_logger.warning(
|
||||
"explicit_bucket_boundaries_advisory must be a sequence of numbers"
|
||||
)
|
||||
|
||||
status = self._register_instrument(
|
||||
name,
|
||||
_Histogram,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIHistogram.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Histogram(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_gauge(self, name, unit="", description="") -> APIGauge:
|
||||
status = self._register_instrument(name, _Gauge, unit, description)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Gauge(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_gauge(
|
||||
self, name, callbacks=None, unit="", description=""
|
||||
) -> APIObservableGauge:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableGauge, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableGauge(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_up_down_counter(
|
||||
self, name, callbacks=None, unit="", description=""
|
||||
) -> APIObservableUpDownCounter:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableUpDownCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableUpDownCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
|
||||
def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter:
|
||||
if exemplar_filter == "trace_based":
|
||||
return TraceBasedExemplarFilter()
|
||||
if exemplar_filter == "always_on":
|
||||
return AlwaysOnExemplarFilter()
|
||||
if exemplar_filter == "always_off":
|
||||
return AlwaysOffExemplarFilter()
|
||||
msg = f"Unknown exemplar filter '{exemplar_filter}'."
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class MeterProvider(APIMeterProvider):
|
||||
r"""See `opentelemetry.metrics.MeterProvider`.
|
||||
|
||||
Args:
|
||||
metric_readers: Register metric readers to collect metrics from the SDK
|
||||
on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is
|
||||
completely independent and will collect separate streams of
|
||||
metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push
|
||||
exporters here.
|
||||
resource: The resource representing what the metrics emitted from the SDK pertain to.
|
||||
shutdown_on_exit: If true, registers an `atexit` handler to call
|
||||
`MeterProvider.shutdown`
|
||||
views: The views to configure the metric output the SDK
|
||||
|
||||
By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s
|
||||
are provided) will report metrics with the default aggregation for the
|
||||
instrument's kind. To disable instruments by default, configure a match-all
|
||||
:class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable
|
||||
individual instruments:
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Disable default views
|
||||
|
||||
MeterProvider(
|
||||
views=[
|
||||
View(instrument_name="*", aggregation=DropAggregation()),
|
||||
View(instrument_name="mycounter"),
|
||||
],
|
||||
# ...
|
||||
)
|
||||
"""
|
||||
|
||||
_all_metric_readers_lock = Lock()
|
||||
_all_metric_readers = weakref.WeakSet()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric_readers: Sequence[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader"
|
||||
] = (),
|
||||
resource: Optional[Resource] = None,
|
||||
exemplar_filter: Optional[ExemplarFilter] = None,
|
||||
shutdown_on_exit: bool = True,
|
||||
views: Sequence["opentelemetry.sdk.metrics.view.View"] = (),
|
||||
):
|
||||
self._lock = Lock()
|
||||
self._meter_lock = Lock()
|
||||
self._atexit_handler = None
|
||||
if resource is None:
|
||||
resource = Resource.create({})
|
||||
self._sdk_config = SdkConfiguration(
|
||||
exemplar_filter=(
|
||||
exemplar_filter
|
||||
or _get_exemplar_filter(
|
||||
environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based")
|
||||
)
|
||||
),
|
||||
resource=resource,
|
||||
metric_readers=metric_readers,
|
||||
views=views,
|
||||
)
|
||||
self._measurement_consumer = SynchronousMeasurementConsumer(
|
||||
sdk_config=self._sdk_config
|
||||
)
|
||||
disabled = environ.get(OTEL_SDK_DISABLED, "")
|
||||
self._disabled = disabled.lower().strip() == "true"
|
||||
|
||||
if shutdown_on_exit:
|
||||
self._atexit_handler = register(self.shutdown)
|
||||
|
||||
self._meters = {}
|
||||
self._shutdown_once = Once()
|
||||
self._shutdown = False
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
with self._all_metric_readers_lock:
|
||||
if metric_reader in self._all_metric_readers:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
f"MetricReader {metric_reader} has been registered "
|
||||
"already in other MeterProvider instance"
|
||||
)
|
||||
|
||||
self._all_metric_readers.add(metric_reader)
|
||||
|
||||
metric_reader._set_collect_callback(
|
||||
self._measurement_consumer.collect
|
||||
)
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
metric_reader_error = {}
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
current_ts = time_ns()
|
||||
try:
|
||||
if current_ts >= deadline_ns:
|
||||
raise MetricsTimeoutError(
|
||||
"Timed out while flushing metric readers"
|
||||
)
|
||||
metric_reader.force_flush(
|
||||
timeout_millis=(deadline_ns - current_ts) / 10**6
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error:
|
||||
metric_reader_error[metric_reader] = error
|
||||
|
||||
if metric_reader_error:
|
||||
metric_reader_error_string = "\n".join(
|
||||
[
|
||||
f"{metric_reader.__class__.__name__}: {repr(error)}"
|
||||
for metric_reader, error in metric_reader_error.items()
|
||||
]
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"MeterProvider.force_flush failed because the following "
|
||||
"metric readers failed during collect:\n"
|
||||
f"{metric_reader_error_string}"
|
||||
)
|
||||
return True
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000):
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
def _shutdown():
|
||||
self._shutdown = True
|
||||
|
||||
did_shutdown = self._shutdown_once.do_once(_shutdown)
|
||||
|
||||
if not did_shutdown:
|
||||
_logger.warning("shutdown can only be called once")
|
||||
return
|
||||
|
||||
metric_reader_error = {}
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
current_ts = time_ns()
|
||||
try:
|
||||
if current_ts >= deadline_ns:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"Didn't get to execute, deadline already exceeded"
|
||||
)
|
||||
metric_reader.shutdown(
|
||||
timeout_millis=(deadline_ns - current_ts) / 10**6
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error:
|
||||
metric_reader_error[metric_reader] = error
|
||||
|
||||
if self._atexit_handler is not None:
|
||||
unregister(self._atexit_handler)
|
||||
self._atexit_handler = None
|
||||
|
||||
if metric_reader_error:
|
||||
metric_reader_error_string = "\n".join(
|
||||
[
|
||||
f"{metric_reader.__class__.__name__}: {repr(error)}"
|
||||
for metric_reader, error in metric_reader_error.items()
|
||||
]
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
(
|
||||
"MeterProvider.shutdown failed because the following "
|
||||
"metric readers failed during shutdown:\n"
|
||||
f"{metric_reader_error_string}"
|
||||
)
|
||||
)
|
||||
|
||||
def get_meter(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> Meter:
|
||||
if self._disabled:
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
if self._shutdown:
|
||||
_logger.warning(
|
||||
"A shutdown `MeterProvider` can not provide a `Meter`"
|
||||
)
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
if not name:
|
||||
_logger.warning("Meter name cannot be None or empty.")
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
info = InstrumentationScope(name, version, schema_url, attributes)
|
||||
with self._meter_lock:
|
||||
if not self._meters.get(info):
|
||||
# FIXME #2558 pass SDKConfig object to meter so that the meter
|
||||
# has access to views.
|
||||
self._meters[info] = Meter(
|
||||
info,
|
||||
self._measurement_consumer,
|
||||
)
|
||||
return self._meters[info]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,153 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from logging import getLogger
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Dict, List, Optional, Sequence
|
||||
|
||||
from opentelemetry.metrics import Instrument
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
DefaultAggregation,
|
||||
_Aggregation,
|
||||
_SumAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.metrics._internal.point import DataPointT
|
||||
from opentelemetry.sdk.metrics._internal.view import View
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class _ViewInstrumentMatch:
|
||||
def __init__(
|
||||
self,
|
||||
view: View,
|
||||
instrument: Instrument,
|
||||
instrument_class_aggregation: Dict[type, Aggregation],
|
||||
):
|
||||
self._view = view
|
||||
self._instrument = instrument
|
||||
self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
|
||||
self._lock = Lock()
|
||||
self._instrument_class_aggregation = instrument_class_aggregation
|
||||
self._name = self._view._name or self._instrument.name
|
||||
self._description = (
|
||||
self._view._description or self._instrument.description
|
||||
)
|
||||
if not isinstance(self._view._aggregation, DefaultAggregation):
|
||||
self._aggregation = self._view._aggregation._create_aggregation(
|
||||
self._instrument,
|
||||
None,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
0,
|
||||
)
|
||||
else:
|
||||
self._aggregation = self._instrument_class_aggregation[
|
||||
self._instrument.__class__
|
||||
]._create_aggregation(
|
||||
self._instrument,
|
||||
None,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
0,
|
||||
)
|
||||
|
||||
def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
|
||||
# pylint: disable=protected-access
|
||||
|
||||
result = (
|
||||
self._name == other._name
|
||||
and self._instrument.unit == other._instrument.unit
|
||||
# The aggregation class is being used here instead of data point
|
||||
# type since they are functionally equivalent.
|
||||
and self._aggregation.__class__ == other._aggregation.__class__
|
||||
)
|
||||
if isinstance(self._aggregation, _SumAggregation):
|
||||
result = (
|
||||
result
|
||||
and self._aggregation._instrument_is_monotonic
|
||||
== other._aggregation._instrument_is_monotonic
|
||||
and self._aggregation._instrument_aggregation_temporality
|
||||
== other._aggregation._instrument_aggregation_temporality
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def consume_measurement(
|
||||
self, measurement: Measurement, should_sample_exemplar: bool = True
|
||||
) -> None:
|
||||
if self._view._attribute_keys is not None:
|
||||
attributes = {}
|
||||
|
||||
for key, value in (measurement.attributes or {}).items():
|
||||
if key in self._view._attribute_keys:
|
||||
attributes[key] = value
|
||||
elif measurement.attributes is not None:
|
||||
attributes = measurement.attributes
|
||||
else:
|
||||
attributes = {}
|
||||
|
||||
aggr_key = frozenset(attributes.items())
|
||||
|
||||
if aggr_key not in self._attributes_aggregation:
|
||||
with self._lock:
|
||||
if aggr_key not in self._attributes_aggregation:
|
||||
if not isinstance(
|
||||
self._view._aggregation, DefaultAggregation
|
||||
):
|
||||
aggregation = (
|
||||
self._view._aggregation._create_aggregation(
|
||||
self._instrument,
|
||||
attributes,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
time_ns(),
|
||||
)
|
||||
)
|
||||
else:
|
||||
aggregation = self._instrument_class_aggregation[
|
||||
self._instrument.__class__
|
||||
]._create_aggregation(
|
||||
self._instrument,
|
||||
attributes,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
time_ns(),
|
||||
)
|
||||
self._attributes_aggregation[aggr_key] = aggregation
|
||||
|
||||
self._attributes_aggregation[aggr_key].aggregate(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
def collect(
|
||||
self,
|
||||
collection_aggregation_temporality: AggregationTemporality,
|
||||
collection_start_nanos: int,
|
||||
) -> Optional[Sequence[DataPointT]]:
|
||||
data_points: List[DataPointT] = []
|
||||
with self._lock:
|
||||
for aggregation in self._attributes_aggregation.values():
|
||||
data_point = aggregation.collect(
|
||||
collection_aggregation_temporality, collection_start_nanos
|
||||
)
|
||||
if data_point is not None:
|
||||
data_points.append(data_point)
|
||||
|
||||
# Returning here None instead of an empty list because the caller
|
||||
# does not consume a sequence and to be consistent with the rest of
|
||||
# collect methods that also return None.
|
||||
return data_points or None
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,17 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class MetricsTimeoutError(Exception):
|
||||
"""Raised when a metrics function times out"""
|
||||
@@ -0,0 +1,39 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .exemplar import Exemplar
|
||||
from .exemplar_filter import (
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
ExemplarFilter,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from .exemplar_reservoir import (
|
||||
AlignedHistogramBucketExemplarReservoir,
|
||||
ExemplarReservoir,
|
||||
ExemplarReservoirBuilder,
|
||||
SimpleFixedSizeExemplarReservoir,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Exemplar",
|
||||
"ExemplarFilter",
|
||||
"AlwaysOffExemplarFilter",
|
||||
"AlwaysOnExemplarFilter",
|
||||
"TraceBasedExemplarFilter",
|
||||
"AlignedHistogramBucketExemplarReservoir",
|
||||
"ExemplarReservoir",
|
||||
"ExemplarReservoirBuilder",
|
||||
"SimpleFixedSizeExemplarReservoir",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,50 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import dataclasses
|
||||
from typing import Optional, Union
|
||||
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Exemplar:
|
||||
"""A representation of an exemplar, which is a sample input measurement.
|
||||
|
||||
Exemplars also hold information about the environment when the measurement
|
||||
was recorded, for example the span and trace ID of the active span when the
|
||||
exemplar was recorded.
|
||||
|
||||
Attributes
|
||||
trace_id: (optional) The trace associated with a recording
|
||||
span_id: (optional) The span associated with a recording
|
||||
time_unix_nano: The time of the observation
|
||||
value: The recorded value
|
||||
filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made.
|
||||
|
||||
References:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
|
||||
"""
|
||||
|
||||
# TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
|
||||
# one will come from napoleon extension and the other from autodoc extension. This
|
||||
# will raise an sphinx error of duplicated object description
|
||||
# See https://github.com/sphinx-doc/sphinx/issues/8664
|
||||
|
||||
filtered_attributes: Attributes
|
||||
value: Union[int, float]
|
||||
time_unix_nano: int
|
||||
span_id: Optional[int] = None
|
||||
trace_id: Optional[int] = None
|
||||
@@ -0,0 +1,134 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Union
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.trace.span import INVALID_SPAN
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
class ExemplarFilter(ABC):
|
||||
"""``ExemplarFilter`` determines which measurements are eligible for becoming an
|
||||
``Exemplar``.
|
||||
|
||||
Exemplar filters are used to filter measurements before attempting to store them
|
||||
in a reservoir.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"ExemplarFilter.should_sample is not implemented"
|
||||
)
|
||||
|
||||
|
||||
class AlwaysOnExemplarFilter(ExemplarFilter):
|
||||
"""An ExemplarFilter which makes all measurements eligible for being an Exemplar.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson
|
||||
"""
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
return True
|
||||
|
||||
|
||||
class AlwaysOffExemplarFilter(ExemplarFilter):
|
||||
"""An ExemplarFilter which makes no measurements eligible for being an Exemplar.
|
||||
|
||||
Using this ExemplarFilter is as good as disabling Exemplar feature.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff
|
||||
"""
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
class TraceBasedExemplarFilter(ExemplarFilter):
|
||||
"""An ExemplarFilter which makes those measurements eligible for being an Exemplar,
|
||||
which are recorded in the context of a sampled parent span.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased
|
||||
"""
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
span = trace.get_current_span(context)
|
||||
if span == INVALID_SPAN:
|
||||
return False
|
||||
return span.get_span_context().trace_flags.sampled
|
||||
@@ -0,0 +1,332 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
from random import randrange
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Union,
|
||||
)
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.trace.span import INVALID_SPAN
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
from .exemplar import Exemplar
|
||||
|
||||
|
||||
class ExemplarReservoir(ABC):
|
||||
"""ExemplarReservoir provide a method to offer measurements to the reservoir
|
||||
and another to collect accumulated Exemplars.
|
||||
|
||||
Note:
|
||||
The constructor MUST accept ``**kwargs`` that may be set from aggregation
|
||||
parameters.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
"""
|
||||
raise NotImplementedError("ExemplarReservoir.offer is not implemented")
|
||||
|
||||
@abstractmethod
|
||||
def collect(self, point_attributes: Attributes) -> List[Exemplar]:
|
||||
"""Returns accumulated Exemplars and also resets the reservoir for the next
|
||||
sampling period
|
||||
|
||||
Args:
|
||||
point_attributes: The attributes associated with metric point.
|
||||
|
||||
Returns:
|
||||
a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
|
||||
exemplars contain the attributes that were filtered out by the aggregator,
|
||||
but recorded alongside the original measurement.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"ExemplarReservoir.collect is not implemented"
|
||||
)
|
||||
|
||||
|
||||
class ExemplarBucket:
|
||||
def __init__(self) -> None:
|
||||
self.__value: Union[int, float] = 0
|
||||
self.__attributes: Attributes = None
|
||||
self.__time_unix_nano: int = 0
|
||||
self.__span_id: Optional[int] = None
|
||||
self.__trace_id: Optional[int] = None
|
||||
self.__offered: bool = False
|
||||
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
"""
|
||||
self.__value = value
|
||||
self.__time_unix_nano = time_unix_nano
|
||||
self.__attributes = attributes
|
||||
span = trace.get_current_span(context)
|
||||
if span != INVALID_SPAN:
|
||||
span_context = span.get_span_context()
|
||||
self.__span_id = span_context.span_id
|
||||
self.__trace_id = span_context.trace_id
|
||||
|
||||
self.__offered = True
|
||||
|
||||
def collect(self, point_attributes: Attributes) -> Optional[Exemplar]:
|
||||
"""May return an Exemplar and resets the bucket for the next sampling period."""
|
||||
if not self.__offered:
|
||||
return None
|
||||
|
||||
# filters out attributes from the measurement that are already included in the metric data point
|
||||
# See the specification for more details:
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
|
||||
filtered_attributes = (
|
||||
{
|
||||
k: v
|
||||
for k, v in self.__attributes.items()
|
||||
if k not in point_attributes
|
||||
}
|
||||
if self.__attributes
|
||||
else None
|
||||
)
|
||||
|
||||
exemplar = Exemplar(
|
||||
filtered_attributes,
|
||||
self.__value,
|
||||
self.__time_unix_nano,
|
||||
self.__span_id,
|
||||
self.__trace_id,
|
||||
)
|
||||
self.__reset()
|
||||
return exemplar
|
||||
|
||||
def __reset(self) -> None:
|
||||
"""Reset the bucket state after a collection cycle."""
|
||||
self.__value = 0
|
||||
self.__attributes = {}
|
||||
self.__time_unix_nano = 0
|
||||
self.__span_id = None
|
||||
self.__trace_id = None
|
||||
self.__offered = False
|
||||
|
||||
|
||||
class BucketIndexError(ValueError):
|
||||
"""An exception raised when the bucket index cannot be found."""
|
||||
|
||||
|
||||
class FixedSizeExemplarReservoirABC(ExemplarReservoir):
|
||||
"""Abstract class for a reservoir with fixed size."""
|
||||
|
||||
def __init__(self, size: int, **kwargs) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self._size: int = size
|
||||
self._reservoir_storage: Mapping[int, ExemplarBucket] = defaultdict(
|
||||
ExemplarBucket
|
||||
)
|
||||
|
||||
def collect(self, point_attributes: Attributes) -> List[Exemplar]:
|
||||
"""Returns accumulated Exemplars and also resets the reservoir for the next
|
||||
sampling period
|
||||
|
||||
Args:
|
||||
point_attributes: The attributes associated with metric point.
|
||||
|
||||
Returns:
|
||||
a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
|
||||
exemplars contain the attributes that were filtered out by the aggregator,
|
||||
but recorded alongside the original measurement.
|
||||
"""
|
||||
exemplars = [
|
||||
e
|
||||
for e in (
|
||||
bucket.collect(point_attributes)
|
||||
for _, bucket in sorted(self._reservoir_storage.items())
|
||||
)
|
||||
if e is not None
|
||||
]
|
||||
self._reset()
|
||||
return exemplars
|
||||
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
"""
|
||||
try:
|
||||
index = self._find_bucket_index(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
|
||||
self._reservoir_storage[index].offer(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
except BucketIndexError:
|
||||
# Ignore invalid bucket index
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _find_bucket_index(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> int:
|
||||
"""Determines the bucket index for the given measurement.
|
||||
|
||||
It should be implemented by subclasses based on specific strategies.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
|
||||
Returns:
|
||||
The bucket index
|
||||
|
||||
Raises:
|
||||
BucketIndexError: If no bucket index can be found.
|
||||
"""
|
||||
|
||||
def _reset(self) -> None:
|
||||
"""Reset the reservoir by resetting any stateful logic after a collection cycle."""
|
||||
|
||||
|
||||
class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC):
|
||||
"""This reservoir uses an uniformly-weighted sampling algorithm based on the number
|
||||
of samples the reservoir has seen so far to determine if the offered measurements
|
||||
should be sampled.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
|
||||
"""
|
||||
|
||||
def __init__(self, size: int = 1, **kwargs) -> None:
|
||||
super().__init__(size, **kwargs)
|
||||
self._measurements_seen: int = 0
|
||||
|
||||
def _reset(self) -> None:
|
||||
super()._reset()
|
||||
self._measurements_seen = 0
|
||||
|
||||
def _find_bucket_index(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> int:
|
||||
self._measurements_seen += 1
|
||||
if self._measurements_seen < self._size:
|
||||
return self._measurements_seen - 1
|
||||
|
||||
index = randrange(0, self._measurements_seen)
|
||||
if index < self._size:
|
||||
return index
|
||||
|
||||
raise BucketIndexError("Unable to find the bucket index.")
|
||||
|
||||
|
||||
class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC):
|
||||
"""This Exemplar reservoir takes a configuration parameter that is the
|
||||
configuration of a Histogram. This implementation keeps the last seen measurement
|
||||
that falls within a histogram bucket.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir
|
||||
"""
|
||||
|
||||
def __init__(self, boundaries: Sequence[float], **kwargs) -> None:
|
||||
super().__init__(len(boundaries) + 1, **kwargs)
|
||||
self._boundaries: Sequence[float] = boundaries
|
||||
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled."""
|
||||
index = self._find_bucket_index(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
self._reservoir_storage[index].offer(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
|
||||
def _find_bucket_index(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> int:
|
||||
for index, boundary in enumerate(self._boundaries):
|
||||
if value <= boundary:
|
||||
return index
|
||||
return len(self._boundaries)
|
||||
|
||||
|
||||
ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir]
|
||||
ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder.
|
||||
|
||||
It may receive the Aggregation parameters it is bounded to; e.g.
|
||||
the _ExplicitBucketHistogramAggregation will provide the boundaries.
|
||||
"""
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,190 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from math import ceil, log2
|
||||
|
||||
|
||||
class Buckets:
|
||||
# No method of this class is protected by locks because instances of this
|
||||
# class are only used in methods that are protected by locks themselves.
|
||||
|
||||
def __init__(self):
|
||||
self._counts = [0]
|
||||
|
||||
# The term index refers to the number of the exponential histogram bucket
|
||||
# used to determine its boundaries. The lower boundary of a bucket is
|
||||
# determined by base ** index and the upper boundary of a bucket is
|
||||
# determined by base ** (index + 1). index values are signedto account
|
||||
# for values less than or equal to 1.
|
||||
|
||||
# self._index_* will all have values equal to a certain index that is
|
||||
# determined by the corresponding mapping _map_to_index function and
|
||||
# the value of the index depends on the value passed to _map_to_index.
|
||||
|
||||
# Index of the 0th position in self._counts: self._counts[0] is the
|
||||
# count in the bucket with index self.__index_base.
|
||||
self.__index_base = 0
|
||||
|
||||
# self.__index_start is the smallest index value represented in
|
||||
# self._counts.
|
||||
self.__index_start = 0
|
||||
|
||||
# self.__index_start is the largest index value represented in
|
||||
# self._counts.
|
||||
self.__index_end = 0
|
||||
|
||||
@property
|
||||
def index_start(self) -> int:
|
||||
return self.__index_start
|
||||
|
||||
@index_start.setter
|
||||
def index_start(self, value: int) -> None:
|
||||
self.__index_start = value
|
||||
|
||||
@property
|
||||
def index_end(self) -> int:
|
||||
return self.__index_end
|
||||
|
||||
@index_end.setter
|
||||
def index_end(self, value: int) -> None:
|
||||
self.__index_end = value
|
||||
|
||||
@property
|
||||
def index_base(self) -> int:
|
||||
return self.__index_base
|
||||
|
||||
@index_base.setter
|
||||
def index_base(self, value: int) -> None:
|
||||
self.__index_base = value
|
||||
|
||||
@property
|
||||
def counts(self):
|
||||
return self._counts
|
||||
|
||||
def get_offset_counts(self):
|
||||
bias = self.__index_base - self.__index_start
|
||||
return self._counts[-bias:] + self._counts[:-bias]
|
||||
|
||||
def grow(self, needed: int, max_size: int) -> None:
|
||||
size = len(self._counts)
|
||||
bias = self.__index_base - self.__index_start
|
||||
old_positive_limit = size - bias
|
||||
|
||||
# 2 ** ceil(log2(needed)) finds the smallest power of two that is larger
|
||||
# or equal than needed:
|
||||
# 2 ** ceil(log2(1)) == 1
|
||||
# 2 ** ceil(log2(2)) == 2
|
||||
# 2 ** ceil(log2(3)) == 4
|
||||
# 2 ** ceil(log2(4)) == 4
|
||||
# 2 ** ceil(log2(5)) == 8
|
||||
# 2 ** ceil(log2(6)) == 8
|
||||
# 2 ** ceil(log2(7)) == 8
|
||||
# 2 ** ceil(log2(8)) == 8
|
||||
new_size = min(2 ** ceil(log2(needed)), max_size)
|
||||
|
||||
new_positive_limit = new_size - bias
|
||||
|
||||
tmp = [0] * new_size
|
||||
tmp[new_positive_limit:] = self._counts[old_positive_limit:]
|
||||
tmp[0:old_positive_limit] = self._counts[0:old_positive_limit]
|
||||
self._counts = tmp
|
||||
|
||||
@property
|
||||
def offset(self) -> int:
|
||||
return self.__index_start
|
||||
|
||||
def __len__(self) -> int:
|
||||
if len(self._counts) == 0:
|
||||
return 0
|
||||
|
||||
if self.__index_end == self.__index_start and self[0] == 0:
|
||||
return 0
|
||||
|
||||
return self.__index_end - self.__index_start + 1
|
||||
|
||||
def __getitem__(self, key: int) -> int:
|
||||
bias = self.__index_base - self.__index_start
|
||||
|
||||
if key < bias:
|
||||
key += len(self._counts)
|
||||
|
||||
key -= bias
|
||||
|
||||
return self._counts[key]
|
||||
|
||||
def downscale(self, amount: int) -> None:
|
||||
"""
|
||||
Rotates, then collapses 2 ** amount to 1 buckets.
|
||||
"""
|
||||
|
||||
bias = self.__index_base - self.__index_start
|
||||
|
||||
if bias != 0:
|
||||
self.__index_base = self.__index_start
|
||||
|
||||
# [0, 1, 2, 3, 4] Original backing array
|
||||
|
||||
self._counts = self._counts[::-1]
|
||||
# [4, 3, 2, 1, 0]
|
||||
|
||||
self._counts = (
|
||||
self._counts[:bias][::-1] + self._counts[bias:][::-1]
|
||||
)
|
||||
# [3, 4, 0, 1, 2] This is a rotation of the backing array.
|
||||
|
||||
size = 1 + self.__index_end - self.__index_start
|
||||
each = 1 << amount
|
||||
inpos = 0
|
||||
outpos = 0
|
||||
|
||||
pos = self.__index_start
|
||||
|
||||
while pos <= self.__index_end:
|
||||
mod = pos % each
|
||||
if mod < 0:
|
||||
mod += each
|
||||
|
||||
index = mod
|
||||
|
||||
while index < each and inpos < size:
|
||||
if outpos != inpos:
|
||||
self._counts[outpos] += self._counts[inpos]
|
||||
self._counts[inpos] = 0
|
||||
|
||||
inpos += 1
|
||||
pos += 1
|
||||
index += 1
|
||||
|
||||
outpos += 1
|
||||
|
||||
self.__index_start >>= amount
|
||||
self.__index_end >>= amount
|
||||
self.__index_base = self.__index_start
|
||||
|
||||
def increment_bucket(self, bucket_index: int, increment: int = 1) -> None:
|
||||
self._counts[bucket_index] += increment
|
||||
|
||||
def copy_empty(self) -> "Buckets":
|
||||
copy = Buckets()
|
||||
|
||||
# pylint: disable=no-member
|
||||
# pylint: disable=protected-access
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
# pylint: disable=invalid-name
|
||||
copy._Buckets__index_base = self._Buckets__index_base
|
||||
copy._Buckets__index_start = self._Buckets__index_start
|
||||
copy._Buckets__index_end = self._Buckets__index_end
|
||||
copy._counts = [0 for _ in self._counts]
|
||||
|
||||
return copy
|
||||
@@ -0,0 +1,98 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Mapping(ABC):
|
||||
"""
|
||||
Parent class for `LogarithmMapping` and `ExponentialMapping`.
|
||||
"""
|
||||
|
||||
# pylint: disable=no-member
|
||||
def __new__(cls, scale: int):
|
||||
with cls._mappings_lock:
|
||||
# cls._mappings and cls._mappings_lock are implemented in each of
|
||||
# the child classes as a dictionary and a lock, respectively. They
|
||||
# are not instantiated here because that would lead to both child
|
||||
# classes having the same instance of cls._mappings and
|
||||
# cls._mappings_lock.
|
||||
if scale not in cls._mappings:
|
||||
cls._mappings[scale] = super().__new__(cls)
|
||||
cls._mappings[scale]._init(scale)
|
||||
|
||||
return cls._mappings[scale]
|
||||
|
||||
@abstractmethod
|
||||
def _init(self, scale: int) -> None:
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
if scale > self._get_max_scale():
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(f"scale is larger than {self._max_scale}")
|
||||
|
||||
if scale < self._get_min_scale():
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(f"scale is smaller than {self._min_scale}")
|
||||
|
||||
# The size of the exponential histogram buckets is determined by a
|
||||
# parameter known as scale, larger values of scale will produce smaller
|
||||
# buckets. Bucket boundaries of the exponential histogram are located
|
||||
# at integer powers of the base, where:
|
||||
#
|
||||
# base = 2 ** (2 ** (-scale))
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#all-scales-use-the-logarithm-function
|
||||
self._scale = scale
|
||||
|
||||
@abstractmethod
|
||||
def _get_min_scale(self) -> int:
|
||||
"""
|
||||
Return the smallest possible value for the mapping scale
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def _get_max_scale(self) -> int:
|
||||
"""
|
||||
Return the largest possible value for the mapping scale
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def map_to_index(self, value: float) -> int:
|
||||
"""
|
||||
Maps positive floating point values to indexes corresponding to
|
||||
`Mapping.scale`. Implementations are not expected to handle zeros,
|
||||
+inf, NaN, or negative values.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_lower_boundary(self, index: int) -> float:
|
||||
"""
|
||||
Returns the lower boundary of a given bucket index. The index is
|
||||
expected to map onto a range that is at least partially inside the
|
||||
range of normal floating point values. If the corresponding
|
||||
bucket's upper boundary is less than or equal to 2 ** -1022,
|
||||
:class:`~opentelemetry.sdk.metrics.MappingUnderflowError`
|
||||
will be raised. If the corresponding bucket's lower boundary is greater
|
||||
than ``sys.float_info.max``,
|
||||
:class:`~opentelemetry.sdk.metrics.MappingOverflowError`
|
||||
will be raised.
|
||||
"""
|
||||
|
||||
@property
|
||||
def scale(self) -> int:
|
||||
"""
|
||||
Returns the parameter that controls the resolution of this mapping.
|
||||
See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md#exponential-scale
|
||||
"""
|
||||
return self._scale
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,26 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class MappingUnderflowError(Exception):
|
||||
"""
|
||||
Raised when computing the lower boundary of an index that maps into a
|
||||
denormal floating point value.
|
||||
"""
|
||||
|
||||
|
||||
class MappingOverflowError(Exception):
|
||||
"""
|
||||
Raised when computing the lower boundary of an index that maps into +inf.
|
||||
"""
|
||||
@@ -0,0 +1,141 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from math import ldexp
|
||||
from threading import Lock
|
||||
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
|
||||
Mapping,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import (
|
||||
MappingOverflowError,
|
||||
MappingUnderflowError,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import (
|
||||
MANTISSA_WIDTH,
|
||||
MAX_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_VALUE,
|
||||
get_ieee_754_exponent,
|
||||
get_ieee_754_mantissa,
|
||||
)
|
||||
|
||||
|
||||
class ExponentMapping(Mapping):
|
||||
# Reference implementation here:
|
||||
# https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go
|
||||
|
||||
_mappings = {}
|
||||
_mappings_lock = Lock()
|
||||
|
||||
_min_scale = -10
|
||||
_max_scale = 0
|
||||
|
||||
def _get_min_scale(self):
|
||||
# _min_scale defines the point at which the exponential mapping
|
||||
# function becomes useless for 64-bit floats. With scale -10, ignoring
|
||||
# subnormal values, bucket indices range from -1 to 1.
|
||||
return -10
|
||||
|
||||
def _get_max_scale(self):
|
||||
# _max_scale is the largest scale supported by exponential mapping. Use
|
||||
# a logarithm mapping for larger scales.
|
||||
return 0
|
||||
|
||||
def _init(self, scale: int):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
super()._init(scale)
|
||||
|
||||
# self._min_normal_lower_boundary_index is the largest index such that
|
||||
# base ** index < MIN_NORMAL_VALUE and
|
||||
# base ** (index + 1) >= MIN_NORMAL_VALUE. An exponential histogram
|
||||
# bucket with this index covers the range
|
||||
# (base ** index, base (index + 1)], including MIN_NORMAL_VALUE. This
|
||||
# is the smallest valid index that contains at least one normal value.
|
||||
index = MIN_NORMAL_EXPONENT >> -self._scale
|
||||
|
||||
if -self._scale < 2:
|
||||
# For scales -1 and 0, the maximum value 2 ** -1022 is a
|
||||
# power-of-two multiple, meaning base ** index == MIN_NORMAL_VALUE.
|
||||
# Subtracting 1 so that base ** (index + 1) == MIN_NORMAL_VALUE.
|
||||
index -= 1
|
||||
|
||||
self._min_normal_lower_boundary_index = index
|
||||
|
||||
# self._max_normal_lower_boundary_index is the index such that
|
||||
# base**index equals the greatest representable lower boundary. An
|
||||
# exponential histogram bucket with this index covers the range
|
||||
# ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk.
|
||||
# metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE.
|
||||
# This bucket is incomplete, since the upper boundary cannot be
|
||||
# represented. One greater than this index corresponds with the bucket
|
||||
# containing values > 2 ** 1024.
|
||||
self._max_normal_lower_boundary_index = (
|
||||
MAX_NORMAL_EXPONENT >> -self._scale
|
||||
)
|
||||
|
||||
def map_to_index(self, value: float) -> int:
|
||||
if value < MIN_NORMAL_VALUE:
|
||||
return self._min_normal_lower_boundary_index
|
||||
|
||||
exponent = get_ieee_754_exponent(value)
|
||||
|
||||
# Positive integers are represented in binary as having an infinite
|
||||
# amount of leading zeroes, for example 2 is represented as ...00010.
|
||||
|
||||
# A negative integer -x is represented in binary as the complement of
|
||||
# (x - 1). For example, -4 is represented as the complement of 4 - 1
|
||||
# == 3. 3 is represented as ...00011. Its compliment is ...11100, the
|
||||
# binary representation of -4.
|
||||
|
||||
# get_ieee_754_mantissa(value) gets the positive integer made up
|
||||
# from the rightmost MANTISSA_WIDTH bits (the mantissa) of the IEEE
|
||||
# 754 representation of value. If value is an exact power of 2, all
|
||||
# these MANTISSA_WIDTH bits would be all zeroes, and when 1 is
|
||||
# subtracted the resulting value is -1. The binary representation of
|
||||
# -1 is ...111, so when these bits are right shifted MANTISSA_WIDTH
|
||||
# places, the resulting value for correction is -1. If value is not an
|
||||
# exact power of 2, at least one of the rightmost MANTISSA_WIDTH
|
||||
# bits would be 1 (even for values whose decimal part is 0, like 5.0
|
||||
# since the IEEE 754 of such number is too the product of a power of 2
|
||||
# (defined in the exponent part of the IEEE 754 representation) and the
|
||||
# value defined in the mantissa). Having at least one of the rightmost
|
||||
# MANTISSA_WIDTH bit being 1 means that get_ieee_754(value) will
|
||||
# always be greater or equal to 1, and when 1 is subtracted, the
|
||||
# result will be greater or equal to 0, whose representation in binary
|
||||
# will be of at most MANTISSA_WIDTH ones that have an infinite
|
||||
# amount of leading zeroes. When those MANTISSA_WIDTH bits are
|
||||
# shifted to the right MANTISSA_WIDTH places, the resulting value
|
||||
# will be 0.
|
||||
|
||||
# In summary, correction will be -1 if value is a power of 2, 0 if not.
|
||||
|
||||
# FIXME Document why we can assume value will not be 0, inf, or NaN.
|
||||
correction = (get_ieee_754_mantissa(value) - 1) >> MANTISSA_WIDTH
|
||||
|
||||
return (exponent + correction) >> -self._scale
|
||||
|
||||
def get_lower_boundary(self, index: int) -> float:
|
||||
if index < self._min_normal_lower_boundary_index:
|
||||
raise MappingUnderflowError()
|
||||
|
||||
if index > self._max_normal_lower_boundary_index:
|
||||
raise MappingOverflowError()
|
||||
|
||||
return ldexp(1, index << -self._scale)
|
||||
|
||||
@property
|
||||
def scale(self) -> int:
|
||||
return self._scale
|
||||
@@ -0,0 +1,175 @@
|
||||
# IEEE 754 Explained
|
||||
|
||||
IEEE 754 is a standard that defines a way to represent certain mathematical
|
||||
objects using binary numbers.
|
||||
|
||||
## Binary Number Fields
|
||||
|
||||
The binary numbers used in IEEE 754 can have different lengths, the length that
|
||||
is interesting for the purposes of this project is 64 bits. These binary
|
||||
numbers are made up of 3 contiguous fields of bits, from left to right:
|
||||
|
||||
1. 1 sign bit
|
||||
2. 11 exponent bits
|
||||
3. 52 mantissa bits
|
||||
|
||||
Depending on the values these fields have, the represented mathematical object
|
||||
can be one of:
|
||||
|
||||
* Floating point number
|
||||
* Zero
|
||||
* NaN
|
||||
* Infinite
|
||||
|
||||
## Floating Point Numbers
|
||||
|
||||
IEEE 754 represents a floating point number $f$ using an exponential
|
||||
notation with 4 components: $sign$, $mantissa$, $base$ and $exponent$:
|
||||
|
||||
$$f = sign \times mantissa \times base ^ {exponent}$$
|
||||
|
||||
There are two possible representations of floating point numbers:
|
||||
_normal_ and _denormal_, which have different valid values for
|
||||
their $mantissa$ and $exponent$ fields.
|
||||
|
||||
### Binary Representation
|
||||
|
||||
$sign$, $mantissa$, and $exponent$ are represented in binary, the
|
||||
representation of each component has certain details explained next.
|
||||
|
||||
$base$ is always $2$ and it is not represented in binary.
|
||||
|
||||
#### Sign
|
||||
|
||||
$sign$ can have 2 values:
|
||||
|
||||
1. $1$ if the `sign` bit is `0`
|
||||
2. $-1$ if the `sign` bit is `1`.
|
||||
|
||||
#### Mantissa
|
||||
|
||||
##### Normal Floating Point Numbers
|
||||
|
||||
$mantissa$ is a positive fractional number whose integer part is $1$, for example
|
||||
$1.2345 \dots$. The `mantissa` bits represent only the fractional part and the
|
||||
$mantissa$ value can be calculated as:
|
||||
|
||||
$$mantissa = 1 + \sum_{i=1}^{52} b_{i} \times 2^{-i} = 1 + \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$
|
||||
|
||||
Where $b_{i}$ is:
|
||||
|
||||
1. $0$ if the bit at the position `i - 1` is `0`.
|
||||
2. $1$ if the bit at the position `i - 1` is `1`.
|
||||
|
||||
##### Denormal Floating Point Numbers
|
||||
|
||||
$mantissa$ is a positive fractional number whose integer part is $0$, for example
|
||||
$0.12345 \dots$. The `mantissa` bits represent only the fractional part and the
|
||||
$mantissa$ value can be calculated as:
|
||||
|
||||
$$mantissa = \sum_{i=1}^{52} b_{i} \times 2^{-i} = \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$
|
||||
|
||||
Where $b_{i}$ is:
|
||||
|
||||
1. $0$ if the bit at the position `i - 1` is `0`.
|
||||
2. $1$ if the bit at the position `i - 1` is `1`.
|
||||
|
||||
#### Exponent
|
||||
|
||||
##### Normal Floating Point Numbers
|
||||
|
||||
Only the following bit sequences are allowed: `00000000001` to `11111111110`.
|
||||
That is, there must be at least one `0` and one `1` in the exponent bits.
|
||||
|
||||
The actual value of the $exponent$ can be calculated as:
|
||||
|
||||
$$exponent = v - bias$$
|
||||
|
||||
where $v$ is the value of the binary number in the exponent bits and $bias$ is $1023$.
|
||||
Considering the restrictions above, the respective minimum and maximum values for the
|
||||
exponent are:
|
||||
|
||||
1. `00000000001` = $1$, $1 - 1023 = -1022$
|
||||
2. `11111111110` = $2046$, $2046 - 1023 = 1023$
|
||||
|
||||
So, $exponent$ is an integer in the range $\left[-1022, 1023\right]$.
|
||||
|
||||
|
||||
##### Denormal Floating Point Numbers
|
||||
|
||||
$exponent$ is always $-1022$. Nevertheless, it is always represented as `00000000000`.
|
||||
|
||||
### Normal and Denormal Floating Point Numbers
|
||||
|
||||
The smallest absolute value a normal floating point number can have is calculated
|
||||
like this:
|
||||
|
||||
$$1 \times 1.0\dots0 \times 2^{-1022} = 2.2250738585072014 \times 10^{-308}$$
|
||||
|
||||
Since normal floating point numbers always have a $1$ as the integer part of the
|
||||
$mantissa$, then smaller values can be achieved by using the smallest possible exponent
|
||||
( $-1022$ ) and a $0$ in the integer part of the $mantissa$, but significant digits are lost.
|
||||
|
||||
The smallest absolute value a denormal floating point number can have is calculated
|
||||
like this:
|
||||
|
||||
$$1 \times 2^{-52} \times 2^{-1022} = 5 \times 10^{-324}$$
|
||||
|
||||
## Zero
|
||||
|
||||
Zero is represented like this:
|
||||
|
||||
* Sign bit: `X`
|
||||
* Exponent bits: `00000000000`
|
||||
* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
## NaN
|
||||
|
||||
There are 2 kinds of NaNs that are represented:
|
||||
|
||||
1. QNaNs (Quiet NaNs): represent the result of indeterminate operations.
|
||||
2. SNaNs (Signalling NaNs): represent the result of invalid operations.
|
||||
|
||||
### QNaNs
|
||||
|
||||
QNaNs are represented like this:
|
||||
|
||||
* Sign bit: `X`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
### SNaNs
|
||||
|
||||
SNaNs are represented like this:
|
||||
|
||||
* Sign bit: `X`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `0XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
## Infinite
|
||||
|
||||
### Positive Infinite
|
||||
|
||||
Positive infinite is represented like this:
|
||||
|
||||
* Sign bit: `0`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
### Negative Infinite
|
||||
|
||||
Negative infinite is represented like this:
|
||||
|
||||
* Sign bit: `1`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
@@ -0,0 +1,117 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ctypes import c_double, c_uint64
|
||||
from sys import float_info
|
||||
|
||||
# IEEE 754 64-bit floating point numbers use 11 bits for the exponent and 52
|
||||
# bits for the mantissa.
|
||||
MANTISSA_WIDTH = 52
|
||||
EXPONENT_WIDTH = 11
|
||||
|
||||
# This mask is equivalent to 52 "1" bits (there are 13 hexadecimal 4-bit "f"s
|
||||
# in the mantissa mask, 13 * 4 == 52) or 0xfffffffffffff in hexadecimal.
|
||||
MANTISSA_MASK = (1 << MANTISSA_WIDTH) - 1
|
||||
|
||||
# There are 11 bits for the exponent, but the exponent values 0 (11 "0"
|
||||
# bits) and 2047 (11 "1" bits) have special meanings so the exponent range is
|
||||
# from 1 to 2046. To calculate the exponent value, 1023 (the bias) is
|
||||
# subtracted from the exponent, so the exponent value range is from -1022 to
|
||||
# +1023.
|
||||
EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1
|
||||
|
||||
# All the exponent mask bits are set to 1 for the 11 exponent bits.
|
||||
EXPONENT_MASK = ((1 << EXPONENT_WIDTH) - 1) << MANTISSA_WIDTH
|
||||
|
||||
# The sign mask has the first bit set to 1 and the rest to 0.
|
||||
SIGN_MASK = 1 << (EXPONENT_WIDTH + MANTISSA_WIDTH)
|
||||
|
||||
# For normal floating point numbers, the exponent can have a value in the
|
||||
# range [-1022, 1023].
|
||||
MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1
|
||||
MAX_NORMAL_EXPONENT = EXPONENT_BIAS
|
||||
|
||||
# The smallest possible normal value is 2.2250738585072014e-308.
|
||||
# This value is the result of using the smallest possible number in the
|
||||
# mantissa, 1.0000000000000000000000000000000000000000000000000000 (52 "0"s in
|
||||
# the fractional part) and a single "1" in the exponent.
|
||||
# Finally 1 * (2 ** -1022) = 2.2250738585072014e-308.
|
||||
MIN_NORMAL_VALUE = float_info.min
|
||||
|
||||
# Greatest possible normal value (1.7976931348623157e+308)
|
||||
# The binary representation of a float in scientific notation uses (for the
|
||||
# mantissa) one bit for the integer part (which is implicit) and 52 bits for
|
||||
# the fractional part. Consider a float binary 1.111. It is equal to 1 + 1/2 +
|
||||
# 1/4 + 1/8. The greatest possible value in the 52-bit binary mantissa would be
|
||||
# then 1.1111111111111111111111111111111111111111111111111111 (52 "1"s in the
|
||||
# fractional part) whose decimal value is 1.9999999999999998. Finally,
|
||||
# 1.9999999999999998 * (2 ** 1023) = 1.7976931348623157e+308.
|
||||
MAX_NORMAL_VALUE = float_info.max
|
||||
|
||||
|
||||
def get_ieee_754_exponent(value: float) -> int:
|
||||
"""
|
||||
Gets the exponent of the IEEE 754 representation of a float.
|
||||
"""
|
||||
|
||||
return (
|
||||
(
|
||||
# This step gives the integer that corresponds to the IEEE 754
|
||||
# representation of a float. For example, consider
|
||||
# -MAX_NORMAL_VALUE for an example. We choose this value because
|
||||
# of its binary representation which makes easy to understand the
|
||||
# subsequent operations.
|
||||
#
|
||||
# c_uint64.from_buffer(c_double(-MAX_NORMAL_VALUE)).value == 18442240474082181119
|
||||
# bin(18442240474082181119) == '0b1111111111101111111111111111111111111111111111111111111111111111'
|
||||
#
|
||||
# The first bit of the previous binary number is the sign bit: 1 (1 means negative, 0 means positive)
|
||||
# The next 11 bits are the exponent bits: 11111111110
|
||||
# The next 52 bits are the mantissa bits: 1111111111111111111111111111111111111111111111111111
|
||||
#
|
||||
# This step isolates the exponent bits, turning every bit outside
|
||||
# of the exponent field (sign and mantissa bits) to 0.
|
||||
c_uint64.from_buffer(c_double(value)).value & EXPONENT_MASK
|
||||
# For the example this means:
|
||||
# 18442240474082181119 & EXPONENT_MASK == 9214364837600034816
|
||||
# bin(9214364837600034816) == '0b111111111100000000000000000000000000000000000000000000000000000'
|
||||
# Notice that the previous binary representation does not include
|
||||
# leading zeroes, so the sign bit is not included since it is a
|
||||
# zero.
|
||||
)
|
||||
# This step moves the exponent bits to the right, removing the
|
||||
# mantissa bits that were set to 0 by the previous step. This
|
||||
# leaves the IEEE 754 exponent value, ready for the next step.
|
||||
>> MANTISSA_WIDTH
|
||||
# For the example this means:
|
||||
# 9214364837600034816 >> MANTISSA_WIDTH == 2046
|
||||
# bin(2046) == '0b11111111110'
|
||||
# As shown above, these are the original 11 bits that correspond to the
|
||||
# exponent.
|
||||
# This step subtracts the exponent bias from the IEEE 754 value,
|
||||
# leaving the actual exponent value.
|
||||
) - EXPONENT_BIAS
|
||||
# For the example this means:
|
||||
# 2046 - EXPONENT_BIAS == 1023
|
||||
# As mentioned in a comment above, the largest value for the exponent is
|
||||
|
||||
|
||||
def get_ieee_754_mantissa(value: float) -> int:
|
||||
return (
|
||||
c_uint64.from_buffer(c_double(value)).value
|
||||
# This step isolates the mantissa bits. There is no need to do any
|
||||
# bit shifting as the mantissa bits are already the rightmost field
|
||||
# in an IEEE 754 representation.
|
||||
& MANTISSA_MASK
|
||||
)
|
||||
@@ -0,0 +1,138 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from math import exp, floor, ldexp, log
|
||||
from threading import Lock
|
||||
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
|
||||
Mapping,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import (
|
||||
MappingOverflowError,
|
||||
MappingUnderflowError,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import (
|
||||
MAX_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_VALUE,
|
||||
get_ieee_754_exponent,
|
||||
get_ieee_754_mantissa,
|
||||
)
|
||||
|
||||
|
||||
class LogarithmMapping(Mapping):
|
||||
# Reference implementation here:
|
||||
# https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go
|
||||
|
||||
_mappings = {}
|
||||
_mappings_lock = Lock()
|
||||
|
||||
_min_scale = 1
|
||||
_max_scale = 20
|
||||
|
||||
def _get_min_scale(self):
|
||||
# _min_scale ensures that ExponentMapping is used for zero and negative
|
||||
# scale values.
|
||||
return self._min_scale
|
||||
|
||||
def _get_max_scale(self):
|
||||
# FIXME The Go implementation uses a value of 20 here, find out the
|
||||
# right value for this implementation, more information here:
|
||||
# https://github.com/lightstep/otel-launcher-go/blob/c9ca8483be067a39ab306b09060446e7fda65f35/lightstep/sdk/metric/aggregator/histogram/structure/README.md#mapping-function
|
||||
# https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go#L32-L45
|
||||
return self._max_scale
|
||||
|
||||
def _init(self, scale: int):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
super()._init(scale)
|
||||
|
||||
# self._scale_factor is defined as a multiplier because multiplication
|
||||
# is faster than division. self._scale_factor is defined as:
|
||||
# index = log(value) * self._scale_factor
|
||||
# Where:
|
||||
# index = log(value) / log(base)
|
||||
# index = log(value) / log(2 ** (2 ** -scale))
|
||||
# index = log(value) / ((2 ** -scale) * log(2))
|
||||
# index = log(value) * ((1 / log(2)) * (2 ** scale))
|
||||
# self._scale_factor = ((1 / log(2)) * (2 ** scale))
|
||||
# self._scale_factor = (1 /log(2)) * (2 ** scale)
|
||||
# self._scale_factor = ldexp(1 / log(2), scale)
|
||||
# This implementation was copied from a Java prototype. See:
|
||||
# https://github.com/newrelic-experimental/newrelic-sketch-java/blob/1ce245713603d61ba3a4510f6df930a5479cd3f6/src/main/java/com/newrelic/nrsketch/indexer/LogIndexer.java
|
||||
# for the equations used here.
|
||||
self._scale_factor = ldexp(1 / log(2), scale)
|
||||
|
||||
# self._min_normal_lower_boundary_index is the index such that
|
||||
# base ** index == MIN_NORMAL_VALUE. An exponential histogram bucket
|
||||
# with this index covers the range
|
||||
# (MIN_NORMAL_VALUE, MIN_NORMAL_VALUE * base]. One less than this index
|
||||
# corresponds with the bucket containing values <= MIN_NORMAL_VALUE.
|
||||
self._min_normal_lower_boundary_index = (
|
||||
MIN_NORMAL_EXPONENT << self._scale
|
||||
)
|
||||
|
||||
# self._max_normal_lower_boundary_index is the index such that
|
||||
# base ** index equals the greatest representable lower boundary. An
|
||||
# exponential histogram bucket with this index covers the range
|
||||
# ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk.
|
||||
# metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE.
|
||||
# This bucket is incomplete, since the upper boundary cannot be
|
||||
# represented. One greater than this index corresponds with the bucket
|
||||
# containing values > 2 ** 1024.
|
||||
self._max_normal_lower_boundary_index = (
|
||||
(MAX_NORMAL_EXPONENT + 1) << self._scale
|
||||
) - 1
|
||||
|
||||
def map_to_index(self, value: float) -> int:
|
||||
"""
|
||||
Maps positive floating point values to indexes corresponding to scale.
|
||||
"""
|
||||
|
||||
# value is subnormal
|
||||
if value <= MIN_NORMAL_VALUE:
|
||||
return self._min_normal_lower_boundary_index - 1
|
||||
|
||||
# value is an exact power of two.
|
||||
if get_ieee_754_mantissa(value) == 0:
|
||||
exponent = get_ieee_754_exponent(value)
|
||||
return (exponent << self._scale) - 1
|
||||
|
||||
return min(
|
||||
floor(log(value) * self._scale_factor),
|
||||
self._max_normal_lower_boundary_index,
|
||||
)
|
||||
|
||||
def get_lower_boundary(self, index: int) -> float:
|
||||
if index >= self._max_normal_lower_boundary_index:
|
||||
if index == self._max_normal_lower_boundary_index:
|
||||
return 2 * exp(
|
||||
(index - (1 << self._scale)) / self._scale_factor
|
||||
)
|
||||
raise MappingOverflowError()
|
||||
|
||||
if index <= self._min_normal_lower_boundary_index:
|
||||
if index == self._min_normal_lower_boundary_index:
|
||||
return MIN_NORMAL_VALUE
|
||||
if index == self._min_normal_lower_boundary_index - 1:
|
||||
return (
|
||||
exp((index + (1 << self._scale)) / self._scale_factor) / 2
|
||||
)
|
||||
raise MappingUnderflowError()
|
||||
|
||||
return exp(index / self._scale_factor)
|
||||
|
||||
@property
|
||||
def scale(self) -> int:
|
||||
return self._scale
|
||||
@@ -0,0 +1,576 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import os
|
||||
import weakref
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from logging import getLogger
|
||||
from os import environ, linesep
|
||||
from sys import stdout
|
||||
from threading import Event, Lock, RLock, Thread
|
||||
from time import time_ns
|
||||
from typing import IO, Callable, Iterable, Optional
|
||||
|
||||
from typing_extensions import final
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics._internal
|
||||
from opentelemetry.context import (
|
||||
_SUPPRESS_INSTRUMENTATION_KEY,
|
||||
attach,
|
||||
detach,
|
||||
set_value,
|
||||
)
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_METRIC_EXPORT_INTERVAL,
|
||||
OTEL_METRIC_EXPORT_TIMEOUT,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
AggregationTemporality,
|
||||
DefaultAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
Counter,
|
||||
Gauge,
|
||||
Histogram,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
UpDownCounter,
|
||||
_Counter,
|
||||
_Gauge,
|
||||
_Histogram,
|
||||
_ObservableCounter,
|
||||
_ObservableGauge,
|
||||
_ObservableUpDownCounter,
|
||||
_UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.point import MetricsData
|
||||
from opentelemetry.util._once import Once
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class MetricExportResult(Enum):
|
||||
"""Result of exporting a metric
|
||||
|
||||
Can be any of the following values:"""
|
||||
|
||||
SUCCESS = 0
|
||||
FAILURE = 1
|
||||
|
||||
|
||||
class MetricExporter(ABC):
|
||||
"""Interface for exporting metrics.
|
||||
|
||||
Interface to be implemented by services that want to export metrics received
|
||||
in their own format.
|
||||
|
||||
Args:
|
||||
preferred_temporality: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to
|
||||
configure exporter level preferred temporality. See `opentelemetry.sdk.metrics.export.MetricReader` for
|
||||
more details on what preferred temporality is.
|
||||
preferred_aggregation: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to
|
||||
configure exporter level preferred aggregation. See `opentelemetry.sdk.metrics.export.MetricReader` for
|
||||
more details on what preferred aggregation is.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
) -> None:
|
||||
self._preferred_temporality = preferred_temporality
|
||||
self._preferred_aggregation = preferred_aggregation
|
||||
|
||||
@abstractmethod
|
||||
def export(
|
||||
self,
|
||||
metrics_data: MetricsData,
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> MetricExportResult:
|
||||
"""Exports a batch of telemetry data.
|
||||
|
||||
Args:
|
||||
metrics: The list of `opentelemetry.sdk.metrics.export.Metric` objects to be exported
|
||||
|
||||
Returns:
|
||||
The result of the export
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
"""
|
||||
Ensure that export of any metrics currently received by the exporter
|
||||
are completed as soon as possible.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
"""Shuts down the exporter.
|
||||
|
||||
Called when the SDK is shut down.
|
||||
"""
|
||||
|
||||
|
||||
class ConsoleMetricExporter(MetricExporter):
|
||||
"""Implementation of :class:`MetricExporter` that prints metrics to the
|
||||
console.
|
||||
|
||||
This class can be used for diagnostic purposes. It prints the exported
|
||||
metrics to the console STDOUT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
out: IO = stdout,
|
||||
formatter: Callable[
|
||||
["opentelemetry.sdk.metrics.export.MetricsData"], str
|
||||
] = lambda metrics_data: metrics_data.to_json() + linesep,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
):
|
||||
super().__init__(
|
||||
preferred_temporality=preferred_temporality,
|
||||
preferred_aggregation=preferred_aggregation,
|
||||
)
|
||||
self.out = out
|
||||
self.formatter = formatter
|
||||
|
||||
def export(
|
||||
self,
|
||||
metrics_data: MetricsData,
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> MetricExportResult:
|
||||
self.out.write(self.formatter(metrics_data))
|
||||
self.out.flush()
|
||||
return MetricExportResult.SUCCESS
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
pass
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
class MetricReader(ABC):
|
||||
# pylint: disable=too-many-branches,broad-exception-raised
|
||||
"""
|
||||
Base class for all metric readers
|
||||
|
||||
Args:
|
||||
preferred_temporality: A mapping between instrument classes and
|
||||
aggregation temporality. By default uses CUMULATIVE for all instrument
|
||||
classes. This mapping will be used to define the default aggregation
|
||||
temporality of every instrument class. If the user wants to make a
|
||||
change in the default aggregation temporality of an instrument class,
|
||||
it is enough to pass here a dictionary whose keys are the instrument
|
||||
classes and the values are the corresponding desired aggregation
|
||||
temporalities of the classes that the user wants to change, not all of
|
||||
them. The classes not included in the passed dictionary will retain
|
||||
their association to their default aggregation temporalities.
|
||||
preferred_aggregation: A mapping between instrument classes and
|
||||
aggregation instances. By default maps all instrument classes to an
|
||||
instance of `DefaultAggregation`. This mapping will be used to
|
||||
define the default aggregation of every instrument class. If the
|
||||
user wants to make a change in the default aggregation of an
|
||||
instrument class, it is enough to pass here a dictionary whose keys
|
||||
are the instrument classes and the values are the corresponding
|
||||
desired aggregation for the instrument classes that the user wants
|
||||
to change, not necessarily all of them. The classes not included in
|
||||
the passed dictionary will retain their association to their
|
||||
default aggregations. The aggregation defined here will be
|
||||
overridden by an aggregation defined by a view that is not
|
||||
`DefaultAggregation`.
|
||||
|
||||
.. document protected _receive_metrics which is a intended to be overridden by subclass
|
||||
.. automethod:: _receive_metrics
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
) -> None:
|
||||
self._collect: Callable[
|
||||
[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader",
|
||||
AggregationTemporality,
|
||||
],
|
||||
Iterable["opentelemetry.sdk.metrics.export.Metric"],
|
||||
] = None
|
||||
|
||||
self._instrument_class_temporality = {
|
||||
_Counter: AggregationTemporality.CUMULATIVE,
|
||||
_UpDownCounter: AggregationTemporality.CUMULATIVE,
|
||||
_Histogram: AggregationTemporality.CUMULATIVE,
|
||||
_Gauge: AggregationTemporality.CUMULATIVE,
|
||||
_ObservableCounter: AggregationTemporality.CUMULATIVE,
|
||||
_ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,
|
||||
_ObservableGauge: AggregationTemporality.CUMULATIVE,
|
||||
}
|
||||
|
||||
if preferred_temporality is not None:
|
||||
for temporality in preferred_temporality.values():
|
||||
if temporality not in (
|
||||
AggregationTemporality.CUMULATIVE,
|
||||
AggregationTemporality.DELTA,
|
||||
):
|
||||
raise Exception(
|
||||
f"Invalid temporality value found {temporality}"
|
||||
)
|
||||
|
||||
if preferred_temporality is not None:
|
||||
for typ, temporality in preferred_temporality.items():
|
||||
if typ is Counter:
|
||||
self._instrument_class_temporality[_Counter] = temporality
|
||||
elif typ is UpDownCounter:
|
||||
self._instrument_class_temporality[_UpDownCounter] = (
|
||||
temporality
|
||||
)
|
||||
elif typ is Histogram:
|
||||
self._instrument_class_temporality[_Histogram] = (
|
||||
temporality
|
||||
)
|
||||
elif typ is Gauge:
|
||||
self._instrument_class_temporality[_Gauge] = temporality
|
||||
elif typ is ObservableCounter:
|
||||
self._instrument_class_temporality[_ObservableCounter] = (
|
||||
temporality
|
||||
)
|
||||
elif typ is ObservableUpDownCounter:
|
||||
self._instrument_class_temporality[
|
||||
_ObservableUpDownCounter
|
||||
] = temporality
|
||||
elif typ is ObservableGauge:
|
||||
self._instrument_class_temporality[_ObservableGauge] = (
|
||||
temporality
|
||||
)
|
||||
else:
|
||||
raise Exception(f"Invalid instrument class found {typ}")
|
||||
|
||||
self._preferred_temporality = preferred_temporality
|
||||
self._instrument_class_aggregation = {
|
||||
_Counter: DefaultAggregation(),
|
||||
_UpDownCounter: DefaultAggregation(),
|
||||
_Histogram: DefaultAggregation(),
|
||||
_Gauge: DefaultAggregation(),
|
||||
_ObservableCounter: DefaultAggregation(),
|
||||
_ObservableUpDownCounter: DefaultAggregation(),
|
||||
_ObservableGauge: DefaultAggregation(),
|
||||
}
|
||||
|
||||
if preferred_aggregation is not None:
|
||||
for typ, aggregation in preferred_aggregation.items():
|
||||
if typ is Counter:
|
||||
self._instrument_class_aggregation[_Counter] = aggregation
|
||||
elif typ is UpDownCounter:
|
||||
self._instrument_class_aggregation[_UpDownCounter] = (
|
||||
aggregation
|
||||
)
|
||||
elif typ is Histogram:
|
||||
self._instrument_class_aggregation[_Histogram] = (
|
||||
aggregation
|
||||
)
|
||||
elif typ is Gauge:
|
||||
self._instrument_class_aggregation[_Gauge] = aggregation
|
||||
elif typ is ObservableCounter:
|
||||
self._instrument_class_aggregation[_ObservableCounter] = (
|
||||
aggregation
|
||||
)
|
||||
elif typ is ObservableUpDownCounter:
|
||||
self._instrument_class_aggregation[
|
||||
_ObservableUpDownCounter
|
||||
] = aggregation
|
||||
elif typ is ObservableGauge:
|
||||
self._instrument_class_aggregation[_ObservableGauge] = (
|
||||
aggregation
|
||||
)
|
||||
else:
|
||||
raise Exception(f"Invalid instrument class found {typ}")
|
||||
|
||||
@final
|
||||
def collect(self, timeout_millis: float = 10_000) -> None:
|
||||
"""Collects the metrics from the internal SDK state and
|
||||
invokes the `_receive_metrics` with the collection.
|
||||
|
||||
Args:
|
||||
timeout_millis: Amount of time in milliseconds before this function
|
||||
raises a timeout error.
|
||||
|
||||
If any of the underlying ``collect`` methods called by this method
|
||||
fails by any reason (including timeout) an exception will be raised
|
||||
detailing the individual errors that caused this function to fail.
|
||||
"""
|
||||
if self._collect is None:
|
||||
_logger.warning(
|
||||
"Cannot call collect on a MetricReader until it is registered on a MeterProvider"
|
||||
)
|
||||
return
|
||||
|
||||
metrics = self._collect(self, timeout_millis=timeout_millis)
|
||||
|
||||
if metrics is not None:
|
||||
self._receive_metrics(
|
||||
metrics,
|
||||
timeout_millis=timeout_millis,
|
||||
)
|
||||
|
||||
@final
|
||||
def _set_collect_callback(
|
||||
self,
|
||||
func: Callable[
|
||||
[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader",
|
||||
AggregationTemporality,
|
||||
],
|
||||
Iterable["opentelemetry.sdk.metrics.export.Metric"],
|
||||
],
|
||||
) -> None:
|
||||
"""This function is internal to the SDK. It should not be called or overridden by users"""
|
||||
self._collect = func
|
||||
|
||||
@abstractmethod
|
||||
def _receive_metrics(
|
||||
self,
|
||||
metrics_data: "opentelemetry.sdk.metrics.export.MetricsData",
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Called by `MetricReader.collect` when it receives a batch of metrics"""
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
self.collect(timeout_millis=timeout_millis)
|
||||
return True
|
||||
|
||||
@abstractmethod
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
"""Shuts down the MetricReader. This method provides a way
|
||||
for the MetricReader to do any cleanup required. A metric reader can
|
||||
only be shutdown once, any subsequent calls are ignored and return
|
||||
failure status.
|
||||
|
||||
When a `MetricReader` is registered on a
|
||||
:class:`~opentelemetry.sdk.metrics.MeterProvider`,
|
||||
:meth:`~opentelemetry.sdk.metrics.MeterProvider.shutdown` will invoke this
|
||||
automatically.
|
||||
"""
|
||||
|
||||
|
||||
class InMemoryMetricReader(MetricReader):
|
||||
"""Implementation of `MetricReader` that returns its metrics from :func:`get_metrics_data`.
|
||||
|
||||
This is useful for e.g. unit tests.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
preferred_temporality=preferred_temporality,
|
||||
preferred_aggregation=preferred_aggregation,
|
||||
)
|
||||
self._lock = RLock()
|
||||
self._metrics_data: "opentelemetry.sdk.metrics.export.MetricsData" = (
|
||||
None
|
||||
)
|
||||
|
||||
def get_metrics_data(
|
||||
self,
|
||||
) -> Optional["opentelemetry.sdk.metrics.export.MetricsData"]:
|
||||
"""Reads and returns current metrics from the SDK"""
|
||||
with self._lock:
|
||||
self.collect()
|
||||
metrics_data = self._metrics_data
|
||||
self._metrics_data = None
|
||||
return metrics_data
|
||||
|
||||
def _receive_metrics(
|
||||
self,
|
||||
metrics_data: "opentelemetry.sdk.metrics.export.MetricsData",
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
with self._lock:
|
||||
self._metrics_data = metrics_data
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class PeriodicExportingMetricReader(MetricReader):
|
||||
"""`PeriodicExportingMetricReader` is an implementation of `MetricReader`
|
||||
that collects metrics based on a user-configurable time interval, and passes the
|
||||
metrics to the configured exporter. If the time interval is set to `math.inf`, the
|
||||
reader will not invoke periodic collection.
|
||||
|
||||
The configured exporter's :py:meth:`~MetricExporter.export` method will not be called
|
||||
concurrently.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
exporter: MetricExporter,
|
||||
export_interval_millis: Optional[float] = None,
|
||||
export_timeout_millis: Optional[float] = None,
|
||||
) -> None:
|
||||
# PeriodicExportingMetricReader defers to exporter for configuration
|
||||
super().__init__(
|
||||
preferred_temporality=exporter._preferred_temporality,
|
||||
preferred_aggregation=exporter._preferred_aggregation,
|
||||
)
|
||||
|
||||
# This lock is held whenever calling self._exporter.export() to prevent concurrent
|
||||
# execution of MetricExporter.export()
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exportbatch
|
||||
self._export_lock = Lock()
|
||||
|
||||
self._exporter = exporter
|
||||
if export_interval_millis is None:
|
||||
try:
|
||||
export_interval_millis = float(
|
||||
environ.get(OTEL_METRIC_EXPORT_INTERVAL, 60000)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.warning(
|
||||
"Found invalid value for export interval, using default"
|
||||
)
|
||||
export_interval_millis = 60000
|
||||
if export_timeout_millis is None:
|
||||
try:
|
||||
export_timeout_millis = float(
|
||||
environ.get(OTEL_METRIC_EXPORT_TIMEOUT, 30000)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.warning(
|
||||
"Found invalid value for export timeout, using default"
|
||||
)
|
||||
export_timeout_millis = 30000
|
||||
self._export_interval_millis = export_interval_millis
|
||||
self._export_timeout_millis = export_timeout_millis
|
||||
self._shutdown = False
|
||||
self._shutdown_event = Event()
|
||||
self._shutdown_once = Once()
|
||||
self._daemon_thread = None
|
||||
if (
|
||||
self._export_interval_millis > 0
|
||||
and self._export_interval_millis < math.inf
|
||||
):
|
||||
self._daemon_thread = Thread(
|
||||
name="OtelPeriodicExportingMetricReader",
|
||||
target=self._ticker,
|
||||
daemon=True,
|
||||
)
|
||||
self._daemon_thread.start()
|
||||
if hasattr(os, "register_at_fork"):
|
||||
weak_at_fork = weakref.WeakMethod(self._at_fork_reinit)
|
||||
|
||||
os.register_at_fork(
|
||||
after_in_child=lambda: weak_at_fork()() # pylint: disable=unnecessary-lambda
|
||||
)
|
||||
elif self._export_interval_millis <= 0:
|
||||
raise ValueError(
|
||||
f"interval value {self._export_interval_millis} is invalid \
|
||||
and needs to be larger than zero."
|
||||
)
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
self._daemon_thread = Thread(
|
||||
name="OtelPeriodicExportingMetricReader",
|
||||
target=self._ticker,
|
||||
daemon=True,
|
||||
)
|
||||
self._daemon_thread.start()
|
||||
|
||||
def _ticker(self) -> None:
|
||||
interval_secs = self._export_interval_millis / 1e3
|
||||
while not self._shutdown_event.wait(interval_secs):
|
||||
try:
|
||||
self.collect(timeout_millis=self._export_timeout_millis)
|
||||
except MetricsTimeoutError:
|
||||
_logger.warning(
|
||||
"Metric collection timed out. Will try again after %s seconds",
|
||||
interval_secs,
|
||||
exc_info=True,
|
||||
)
|
||||
# one last collection below before shutting down completely
|
||||
try:
|
||||
self.collect(timeout_millis=self._export_interval_millis)
|
||||
except MetricsTimeoutError:
|
||||
_logger.warning(
|
||||
"Metric collection timed out.",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
def _receive_metrics(
|
||||
self,
|
||||
metrics_data: MetricsData,
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
# pylint: disable=broad-exception-caught,invalid-name
|
||||
try:
|
||||
with self._export_lock:
|
||||
self._exporter.export(
|
||||
metrics_data, timeout_millis=timeout_millis
|
||||
)
|
||||
except Exception:
|
||||
_logger.exception("Exception while exporting metrics")
|
||||
detach(token)
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
def _shutdown():
|
||||
self._shutdown = True
|
||||
|
||||
did_set = self._shutdown_once.do_once(_shutdown)
|
||||
if not did_set:
|
||||
_logger.warning("Can't shutdown multiple times")
|
||||
return
|
||||
|
||||
self._shutdown_event.set()
|
||||
if self._daemon_thread:
|
||||
self._daemon_thread.join(timeout=(deadline_ns - time_ns()) / 10**9)
|
||||
self._exporter.shutdown(timeout=(deadline_ns - time_ns()) / 10**6)
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
super().force_flush(timeout_millis=timeout_millis)
|
||||
self._exporter.force_flush(timeout_millis=timeout_millis)
|
||||
return True
|
||||
Binary file not shown.
@@ -0,0 +1,334 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=too-many-ancestors, unused-import
|
||||
from __future__ import annotations
|
||||
|
||||
from logging import getLogger
|
||||
from time import time_ns
|
||||
from typing import Generator, Iterable, List, Sequence, Union
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
from opentelemetry.context import Context, get_current
|
||||
from opentelemetry.metrics import CallbackT
|
||||
from opentelemetry.metrics import Counter as APICounter
|
||||
from opentelemetry.metrics import Histogram as APIHistogram
|
||||
from opentelemetry.metrics import ObservableCounter as APIObservableCounter
|
||||
from opentelemetry.metrics import ObservableGauge as APIObservableGauge
|
||||
from opentelemetry.metrics import (
|
||||
ObservableUpDownCounter as APIObservableUpDownCounter,
|
||||
)
|
||||
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
|
||||
from opentelemetry.metrics import _Gauge as APIGauge
|
||||
from opentelemetry.metrics._internal.instrument import (
|
||||
CallbackOptions,
|
||||
_MetricsHistogramAdvisory,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
_ERROR_MESSAGE = (
|
||||
"Expected ASCII string of maximum length 63 characters but got {}"
|
||||
)
|
||||
|
||||
|
||||
class _Synchronous:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
result = self._check_name_unit_description(name, unit, description)
|
||||
|
||||
if result["name"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(name))
|
||||
|
||||
if result["unit"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(unit))
|
||||
|
||||
name = result["name"]
|
||||
unit = result["unit"]
|
||||
description = result["description"]
|
||||
|
||||
self.name = name.lower()
|
||||
self.unit = unit
|
||||
self.description = description
|
||||
self.instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
|
||||
|
||||
class _Asynchronous:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
|
||||
callbacks: Iterable[CallbackT] | None = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
result = self._check_name_unit_description(name, unit, description)
|
||||
|
||||
if result["name"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(name))
|
||||
|
||||
if result["unit"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(unit))
|
||||
|
||||
name = result["name"]
|
||||
unit = result["unit"]
|
||||
description = result["description"]
|
||||
|
||||
self.name = name.lower()
|
||||
self.unit = unit
|
||||
self.description = description
|
||||
self.instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
super().__init__(name, callbacks, unit=unit, description=description)
|
||||
|
||||
self._callbacks: List[CallbackT] = []
|
||||
|
||||
if callbacks is not None:
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, Generator):
|
||||
# advance generator to it's first yield
|
||||
next(callback)
|
||||
|
||||
def inner(
|
||||
options: CallbackOptions,
|
||||
callback=callback,
|
||||
) -> Iterable[Measurement]:
|
||||
try:
|
||||
return callback.send(options)
|
||||
except StopIteration:
|
||||
return []
|
||||
|
||||
self._callbacks.append(inner)
|
||||
else:
|
||||
self._callbacks.append(callback)
|
||||
|
||||
def callback(
|
||||
self, callback_options: CallbackOptions
|
||||
) -> Iterable[Measurement]:
|
||||
for callback in self._callbacks:
|
||||
try:
|
||||
for api_measurement in callback(callback_options):
|
||||
yield Measurement(
|
||||
api_measurement.value,
|
||||
time_unix_nano=time_ns(),
|
||||
instrument=self,
|
||||
context=api_measurement.context or get_current(),
|
||||
attributes=api_measurement.attributes,
|
||||
)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.exception(
|
||||
"Callback failed for instrument %s.", self.name
|
||||
)
|
||||
|
||||
|
||||
class Counter(_Synchronous, APICounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is Counter:
|
||||
raise TypeError("Counter must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
if amount < 0:
|
||||
_logger.warning(
|
||||
"Add amount must be non-negative on Counter %s.", self.name
|
||||
)
|
||||
return
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class UpDownCounter(_Synchronous, APIUpDownCounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is UpDownCounter:
|
||||
raise TypeError("UpDownCounter must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ObservableCounter(_Asynchronous, APIObservableCounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is ObservableCounter:
|
||||
raise TypeError(
|
||||
"ObservableCounter must be instantiated via a meter."
|
||||
)
|
||||
return super().__new__(cls)
|
||||
|
||||
|
||||
class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is ObservableUpDownCounter:
|
||||
raise TypeError(
|
||||
"ObservableUpDownCounter must be instantiated via a meter."
|
||||
)
|
||||
return super().__new__(cls)
|
||||
|
||||
|
||||
class Histogram(_Synchronous, APIHistogram):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
explicit_bucket_boundaries_advisory: Sequence[float] | None = None,
|
||||
):
|
||||
super().__init__(
|
||||
name,
|
||||
unit=unit,
|
||||
description=description,
|
||||
instrumentation_scope=instrumentation_scope,
|
||||
measurement_consumer=measurement_consumer,
|
||||
)
|
||||
self._advisory = _MetricsHistogramAdvisory(
|
||||
explicit_bucket_boundaries=explicit_bucket_boundaries_advisory
|
||||
)
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is Histogram:
|
||||
raise TypeError("Histogram must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def record(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
if amount < 0:
|
||||
_logger.warning(
|
||||
"Record amount must be non-negative on Histogram %s.",
|
||||
self.name,
|
||||
)
|
||||
return
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Gauge(_Synchronous, APIGauge):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is Gauge:
|
||||
raise TypeError("Gauge must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def set(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ObservableGauge(_Asynchronous, APIObservableGauge):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is ObservableGauge:
|
||||
raise TypeError(
|
||||
"ObservableGauge must be instantiated via a meter."
|
||||
)
|
||||
return super().__new__(cls)
|
||||
|
||||
|
||||
# Below classes exist to prevent the direct instantiation
|
||||
class _Counter(Counter):
|
||||
pass
|
||||
|
||||
|
||||
class _UpDownCounter(UpDownCounter):
|
||||
pass
|
||||
|
||||
|
||||
class _ObservableCounter(ObservableCounter):
|
||||
pass
|
||||
|
||||
|
||||
class _ObservableUpDownCounter(ObservableUpDownCounter):
|
||||
pass
|
||||
|
||||
|
||||
class _Histogram(Histogram):
|
||||
pass
|
||||
|
||||
|
||||
class _Gauge(Gauge):
|
||||
pass
|
||||
|
||||
|
||||
class _ObservableGauge(ObservableGauge):
|
||||
pass
|
||||
@@ -0,0 +1,45 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.metrics import Instrument
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Measurement:
|
||||
"""
|
||||
Represents a data point reported via the metrics API to the SDK.
|
||||
|
||||
Attributes
|
||||
value: Measured value
|
||||
time_unix_nano: The time the API call was made to record the Measurement
|
||||
instrument: The instrument that produced this `Measurement`.
|
||||
context: The active Context of the Measurement at API call time.
|
||||
attributes: Measurement attributes
|
||||
"""
|
||||
|
||||
# TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
|
||||
# one will come from napoleon extension and the other from autodoc extension. This
|
||||
# will raise an sphinx error of duplicated object description
|
||||
# See https://github.com/sphinx-doc/sphinx/issues/8664
|
||||
|
||||
value: Union[int, float]
|
||||
time_unix_nano: int
|
||||
instrument: Instrument
|
||||
context: Context
|
||||
attributes: Attributes = None
|
||||
@@ -0,0 +1,145 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Iterable, List, Mapping, Optional
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
import opentelemetry.sdk.metrics._internal.instrument
|
||||
import opentelemetry.sdk.metrics._internal.sdk_configuration
|
||||
from opentelemetry.metrics._internal.instrument import CallbackOptions
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.metrics._internal.metric_reader_storage import (
|
||||
MetricReaderStorage,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.point import Metric
|
||||
|
||||
|
||||
class MeasurementConsumer(ABC):
|
||||
@abstractmethod
|
||||
def consume_measurement(self, measurement: Measurement) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def register_asynchronous_instrument(
|
||||
self,
|
||||
instrument: (
|
||||
"opentelemetry.sdk.metrics._internal.instrument_Asynchronous"
|
||||
),
|
||||
):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def collect(
|
||||
self,
|
||||
metric_reader: "opentelemetry.sdk.metrics.MetricReader",
|
||||
timeout_millis: float = 10_000,
|
||||
) -> Optional[Iterable[Metric]]:
|
||||
pass
|
||||
|
||||
|
||||
class SynchronousMeasurementConsumer(MeasurementConsumer):
|
||||
def __init__(
|
||||
self,
|
||||
sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration",
|
||||
) -> None:
|
||||
self._lock = Lock()
|
||||
self._sdk_config = sdk_config
|
||||
# should never be mutated
|
||||
self._reader_storages: Mapping[
|
||||
"opentelemetry.sdk.metrics.MetricReader", MetricReaderStorage
|
||||
] = {
|
||||
reader: MetricReaderStorage(
|
||||
sdk_config,
|
||||
reader._instrument_class_temporality,
|
||||
reader._instrument_class_aggregation,
|
||||
)
|
||||
for reader in sdk_config.metric_readers
|
||||
}
|
||||
self._async_instruments: List[
|
||||
"opentelemetry.sdk.metrics._internal.instrument._Asynchronous"
|
||||
] = []
|
||||
|
||||
def consume_measurement(self, measurement: Measurement) -> None:
|
||||
should_sample_exemplar = (
|
||||
self._sdk_config.exemplar_filter.should_sample(
|
||||
measurement.value,
|
||||
measurement.time_unix_nano,
|
||||
measurement.attributes,
|
||||
measurement.context,
|
||||
)
|
||||
)
|
||||
for reader_storage in self._reader_storages.values():
|
||||
reader_storage.consume_measurement(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
def register_asynchronous_instrument(
|
||||
self,
|
||||
instrument: (
|
||||
"opentelemetry.sdk.metrics._internal.instrument._Asynchronous"
|
||||
),
|
||||
) -> None:
|
||||
with self._lock:
|
||||
self._async_instruments.append(instrument)
|
||||
|
||||
def collect(
|
||||
self,
|
||||
metric_reader: "opentelemetry.sdk.metrics.MetricReader",
|
||||
timeout_millis: float = 10_000,
|
||||
) -> Optional[Iterable[Metric]]:
|
||||
with self._lock:
|
||||
metric_reader_storage = self._reader_storages[metric_reader]
|
||||
# for now, just use the defaults
|
||||
callback_options = CallbackOptions()
|
||||
deadline_ns = time_ns() + (timeout_millis * 1e6)
|
||||
|
||||
default_timeout_ns = 10000 * 1e6
|
||||
|
||||
for async_instrument in self._async_instruments:
|
||||
remaining_time = deadline_ns - time_ns()
|
||||
|
||||
if remaining_time < default_timeout_ns:
|
||||
callback_options = CallbackOptions(
|
||||
timeout_millis=remaining_time / 1e6
|
||||
)
|
||||
|
||||
measurements = async_instrument.callback(callback_options)
|
||||
if time_ns() >= deadline_ns:
|
||||
raise MetricsTimeoutError(
|
||||
"Timed out while executing callback"
|
||||
)
|
||||
|
||||
for measurement in measurements:
|
||||
should_sample_exemplar = (
|
||||
self._sdk_config.exemplar_filter.should_sample(
|
||||
measurement.value,
|
||||
measurement.time_unix_nano,
|
||||
measurement.attributes,
|
||||
measurement.context,
|
||||
)
|
||||
)
|
||||
metric_reader_storage.consume_measurement(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
result = self._reader_storages[metric_reader].collect()
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,315 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from logging import getLogger
|
||||
from threading import RLock
|
||||
from time import time_ns
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from opentelemetry.metrics import (
|
||||
Asynchronous,
|
||||
Counter,
|
||||
Instrument,
|
||||
ObservableCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal._view_instrument_match import (
|
||||
_ViewInstrumentMatch,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
ExplicitBucketHistogramAggregation,
|
||||
_DropAggregation,
|
||||
_ExplicitBucketHistogramAggregation,
|
||||
_ExponentialBucketHistogramAggregation,
|
||||
_LastValueAggregation,
|
||||
_SumAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.metrics._internal.point import (
|
||||
ExponentialHistogram,
|
||||
Gauge,
|
||||
Histogram,
|
||||
Metric,
|
||||
MetricsData,
|
||||
ResourceMetrics,
|
||||
ScopeMetrics,
|
||||
Sum,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
|
||||
SdkConfiguration,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.view import View
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
_DEFAULT_VIEW = View(instrument_name="")
|
||||
|
||||
|
||||
class MetricReaderStorage:
|
||||
"""The SDK's storage for a given reader"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sdk_config: SdkConfiguration,
|
||||
instrument_class_temporality: Dict[type, AggregationTemporality],
|
||||
instrument_class_aggregation: Dict[type, Aggregation],
|
||||
) -> None:
|
||||
self._lock = RLock()
|
||||
self._sdk_config = sdk_config
|
||||
self._instrument_view_instrument_matches: Dict[
|
||||
Instrument, List[_ViewInstrumentMatch]
|
||||
] = {}
|
||||
self._instrument_class_temporality = instrument_class_temporality
|
||||
self._instrument_class_aggregation = instrument_class_aggregation
|
||||
|
||||
def _get_or_init_view_instrument_match(
|
||||
self, instrument: Instrument
|
||||
) -> List[_ViewInstrumentMatch]:
|
||||
# Optimistically get the relevant views for the given instrument. Once set for a given
|
||||
# instrument, the mapping will never change
|
||||
|
||||
if instrument in self._instrument_view_instrument_matches:
|
||||
return self._instrument_view_instrument_matches[instrument]
|
||||
|
||||
with self._lock:
|
||||
# double check if it was set before we held the lock
|
||||
if instrument in self._instrument_view_instrument_matches:
|
||||
return self._instrument_view_instrument_matches[instrument]
|
||||
|
||||
# not present, hold the lock and add a new mapping
|
||||
view_instrument_matches = []
|
||||
|
||||
self._handle_view_instrument_match(
|
||||
instrument, view_instrument_matches
|
||||
)
|
||||
|
||||
# if no view targeted the instrument, use the default
|
||||
if not view_instrument_matches:
|
||||
view_instrument_matches.append(
|
||||
_ViewInstrumentMatch(
|
||||
view=_DEFAULT_VIEW,
|
||||
instrument=instrument,
|
||||
instrument_class_aggregation=(
|
||||
self._instrument_class_aggregation
|
||||
),
|
||||
)
|
||||
)
|
||||
self._instrument_view_instrument_matches[instrument] = (
|
||||
view_instrument_matches
|
||||
)
|
||||
|
||||
return view_instrument_matches
|
||||
|
||||
def consume_measurement(
|
||||
self, measurement: Measurement, should_sample_exemplar: bool = True
|
||||
) -> None:
|
||||
for view_instrument_match in self._get_or_init_view_instrument_match(
|
||||
measurement.instrument
|
||||
):
|
||||
view_instrument_match.consume_measurement(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
def collect(self) -> Optional[MetricsData]:
|
||||
# Use a list instead of yielding to prevent a slow reader from holding
|
||||
# SDK locks
|
||||
|
||||
# While holding the lock, new _ViewInstrumentMatch can't be added from
|
||||
# another thread (so we are sure we collect all existing view).
|
||||
# However, instruments can still send measurements that will make it
|
||||
# into the individual aggregations; collection will acquire those locks
|
||||
# iteratively to keep locking as fine-grained as possible. One side
|
||||
# effect is that end times can be slightly skewed among the metric
|
||||
# streams produced by the SDK, but we still align the output timestamps
|
||||
# for a single instrument.
|
||||
|
||||
collection_start_nanos = time_ns()
|
||||
|
||||
with self._lock:
|
||||
instrumentation_scope_scope_metrics: Dict[
|
||||
InstrumentationScope, ScopeMetrics
|
||||
] = {}
|
||||
|
||||
for (
|
||||
instrument,
|
||||
view_instrument_matches,
|
||||
) in self._instrument_view_instrument_matches.items():
|
||||
aggregation_temporality = self._instrument_class_temporality[
|
||||
instrument.__class__
|
||||
]
|
||||
|
||||
metrics: List[Metric] = []
|
||||
|
||||
for view_instrument_match in view_instrument_matches:
|
||||
data_points = view_instrument_match.collect(
|
||||
aggregation_temporality, collection_start_nanos
|
||||
)
|
||||
|
||||
if data_points is None:
|
||||
continue
|
||||
|
||||
if isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_SumAggregation,
|
||||
):
|
||||
data = Sum(
|
||||
aggregation_temporality=aggregation_temporality,
|
||||
data_points=data_points,
|
||||
is_monotonic=isinstance(
|
||||
instrument, (Counter, ObservableCounter)
|
||||
),
|
||||
)
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_LastValueAggregation,
|
||||
):
|
||||
data = Gauge(data_points=data_points)
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_ExplicitBucketHistogramAggregation,
|
||||
):
|
||||
data = Histogram(
|
||||
data_points=data_points,
|
||||
aggregation_temporality=aggregation_temporality,
|
||||
)
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_DropAggregation,
|
||||
):
|
||||
continue
|
||||
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_ExponentialBucketHistogramAggregation,
|
||||
):
|
||||
data = ExponentialHistogram(
|
||||
data_points=data_points,
|
||||
aggregation_temporality=aggregation_temporality,
|
||||
)
|
||||
|
||||
metrics.append(
|
||||
Metric(
|
||||
# pylint: disable=protected-access
|
||||
# pylint: disable=possibly-used-before-assignment
|
||||
name=view_instrument_match._name,
|
||||
description=view_instrument_match._description,
|
||||
unit=view_instrument_match._instrument.unit,
|
||||
data=data,
|
||||
)
|
||||
)
|
||||
|
||||
if metrics:
|
||||
if instrument.instrumentation_scope not in (
|
||||
instrumentation_scope_scope_metrics
|
||||
):
|
||||
instrumentation_scope_scope_metrics[
|
||||
instrument.instrumentation_scope
|
||||
] = ScopeMetrics(
|
||||
scope=instrument.instrumentation_scope,
|
||||
metrics=metrics,
|
||||
schema_url=instrument.instrumentation_scope.schema_url,
|
||||
)
|
||||
else:
|
||||
instrumentation_scope_scope_metrics[
|
||||
instrument.instrumentation_scope
|
||||
].metrics.extend(metrics)
|
||||
|
||||
if instrumentation_scope_scope_metrics:
|
||||
return MetricsData(
|
||||
resource_metrics=[
|
||||
ResourceMetrics(
|
||||
resource=self._sdk_config.resource,
|
||||
scope_metrics=list(
|
||||
instrumentation_scope_scope_metrics.values()
|
||||
),
|
||||
schema_url=self._sdk_config.resource.schema_url,
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _handle_view_instrument_match(
|
||||
self,
|
||||
instrument: Instrument,
|
||||
view_instrument_matches: List["_ViewInstrumentMatch"],
|
||||
) -> None:
|
||||
for view in self._sdk_config.views:
|
||||
# pylint: disable=protected-access
|
||||
if not view._match(instrument):
|
||||
continue
|
||||
|
||||
if not self._check_view_instrument_compatibility(view, instrument):
|
||||
continue
|
||||
|
||||
new_view_instrument_match = _ViewInstrumentMatch(
|
||||
view=view,
|
||||
instrument=instrument,
|
||||
instrument_class_aggregation=(
|
||||
self._instrument_class_aggregation
|
||||
),
|
||||
)
|
||||
|
||||
for (
|
||||
existing_view_instrument_matches
|
||||
) in self._instrument_view_instrument_matches.values():
|
||||
for (
|
||||
existing_view_instrument_match
|
||||
) in existing_view_instrument_matches:
|
||||
if existing_view_instrument_match.conflicts(
|
||||
new_view_instrument_match
|
||||
):
|
||||
_logger.warning(
|
||||
"Views %s and %s will cause conflicting "
|
||||
"metrics identities",
|
||||
existing_view_instrument_match._view,
|
||||
new_view_instrument_match._view,
|
||||
)
|
||||
|
||||
view_instrument_matches.append(new_view_instrument_match)
|
||||
|
||||
@staticmethod
|
||||
def _check_view_instrument_compatibility(
|
||||
view: View, instrument: Instrument
|
||||
) -> bool:
|
||||
"""
|
||||
Checks if a view and an instrument are compatible.
|
||||
|
||||
Returns `true` if they are compatible and a `_ViewInstrumentMatch`
|
||||
object should be created, `false` otherwise.
|
||||
"""
|
||||
|
||||
result = True
|
||||
|
||||
# pylint: disable=protected-access
|
||||
if isinstance(instrument, Asynchronous) and isinstance(
|
||||
view._aggregation, ExplicitBucketHistogramAggregation
|
||||
):
|
||||
_logger.warning(
|
||||
"View %s and instrument %s will produce "
|
||||
"semantic errors when matched, the view "
|
||||
"has not been applied.",
|
||||
view,
|
||||
instrument,
|
||||
)
|
||||
result = False
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,277 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from json import dumps, loads
|
||||
from typing import Optional, Sequence, Union
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics._internal
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import Exemplar
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class NumberDataPoint:
|
||||
"""Single data point in a timeseries that describes the time-varying scalar
|
||||
value of a metric.
|
||||
"""
|
||||
|
||||
attributes: Attributes
|
||||
start_time_unix_nano: int
|
||||
time_unix_nano: int
|
||||
value: Union[int, float]
|
||||
exemplars: Sequence[Exemplar] = field(default_factory=list)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(asdict(self), indent=indent)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class HistogramDataPoint:
|
||||
"""Single data point in a timeseries that describes the time-varying scalar
|
||||
value of a metric.
|
||||
"""
|
||||
|
||||
attributes: Attributes
|
||||
start_time_unix_nano: int
|
||||
time_unix_nano: int
|
||||
count: int
|
||||
sum: Union[int, float]
|
||||
bucket_counts: Sequence[int]
|
||||
explicit_bounds: Sequence[float]
|
||||
min: float
|
||||
max: float
|
||||
exemplars: Sequence[Exemplar] = field(default_factory=list)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(asdict(self), indent=indent)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Buckets:
|
||||
offset: int
|
||||
bucket_counts: Sequence[int]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExponentialHistogramDataPoint:
|
||||
"""Single data point in a timeseries whose boundaries are defined by an
|
||||
exponential function. This timeseries describes the time-varying scalar
|
||||
value of a metric.
|
||||
"""
|
||||
|
||||
attributes: Attributes
|
||||
start_time_unix_nano: int
|
||||
time_unix_nano: int
|
||||
count: int
|
||||
sum: Union[int, float]
|
||||
scale: int
|
||||
zero_count: int
|
||||
positive: Buckets
|
||||
negative: Buckets
|
||||
flags: int
|
||||
min: float
|
||||
max: float
|
||||
exemplars: Sequence[Exemplar] = field(default_factory=list)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(asdict(self), indent=indent)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExponentialHistogram:
|
||||
"""Represents the type of a metric that is calculated by aggregating as an
|
||||
ExponentialHistogram of all reported measurements over a time interval.
|
||||
"""
|
||||
|
||||
data_points: Sequence[ExponentialHistogramDataPoint]
|
||||
aggregation_temporality: (
|
||||
"opentelemetry.sdk.metrics.export.AggregationTemporality"
|
||||
)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
"aggregation_temporality": self.aggregation_temporality,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Sum:
|
||||
"""Represents the type of a scalar metric that is calculated as a sum of
|
||||
all reported measurements over a time interval."""
|
||||
|
||||
data_points: Sequence[NumberDataPoint]
|
||||
aggregation_temporality: (
|
||||
"opentelemetry.sdk.metrics.export.AggregationTemporality"
|
||||
)
|
||||
is_monotonic: bool
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
"aggregation_temporality": self.aggregation_temporality,
|
||||
"is_monotonic": self.is_monotonic,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Gauge:
|
||||
"""Represents the type of a scalar metric that always exports the current
|
||||
value for every data point. It should be used for an unknown
|
||||
aggregation."""
|
||||
|
||||
data_points: Sequence[NumberDataPoint]
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Histogram:
|
||||
"""Represents the type of a metric that is calculated by aggregating as a
|
||||
histogram of all reported measurements over a time interval."""
|
||||
|
||||
data_points: Sequence[HistogramDataPoint]
|
||||
aggregation_temporality: (
|
||||
"opentelemetry.sdk.metrics.export.AggregationTemporality"
|
||||
)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
"aggregation_temporality": self.aggregation_temporality,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
DataT = Union[Sum, Gauge, Histogram, ExponentialHistogram]
|
||||
DataPointT = Union[
|
||||
NumberDataPoint, HistogramDataPoint, ExponentialHistogramDataPoint
|
||||
]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Metric:
|
||||
"""Represents a metric point in the OpenTelemetry data model to be
|
||||
exported."""
|
||||
|
||||
name: str
|
||||
description: Optional[str]
|
||||
unit: Optional[str]
|
||||
data: DataT
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"name": self.name,
|
||||
"description": self.description or "",
|
||||
"unit": self.unit or "",
|
||||
"data": loads(self.data.to_json(indent=indent)),
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ScopeMetrics:
|
||||
"""A collection of Metrics produced by a scope"""
|
||||
|
||||
scope: InstrumentationScope
|
||||
metrics: Sequence[Metric]
|
||||
schema_url: str
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"scope": loads(self.scope.to_json(indent=indent)),
|
||||
"metrics": [
|
||||
loads(metric.to_json(indent=indent))
|
||||
for metric in self.metrics
|
||||
],
|
||||
"schema_url": self.schema_url,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ResourceMetrics:
|
||||
"""A collection of ScopeMetrics from a Resource"""
|
||||
|
||||
resource: Resource
|
||||
scope_metrics: Sequence[ScopeMetrics]
|
||||
schema_url: str
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"resource": loads(self.resource.to_json(indent=indent)),
|
||||
"scope_metrics": [
|
||||
loads(scope_metrics.to_json(indent=indent))
|
||||
for scope_metrics in self.scope_metrics
|
||||
],
|
||||
"schema_url": self.schema_url,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MetricsData:
|
||||
"""An array of ResourceMetrics"""
|
||||
|
||||
resource_metrics: Sequence[ResourceMetrics]
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"resource_metrics": [
|
||||
loads(resource_metrics.to_json(indent=indent))
|
||||
for resource_metrics in self.resource_metrics
|
||||
]
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
@@ -0,0 +1,30 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Sequence
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
import opentelemetry.sdk.resources
|
||||
|
||||
|
||||
@dataclass
|
||||
class SdkConfiguration:
|
||||
exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter"
|
||||
resource: "opentelemetry.sdk.resources.Resource"
|
||||
metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"]
|
||||
views: Sequence["opentelemetry.sdk.metrics.View"]
|
||||
@@ -0,0 +1,195 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from fnmatch import fnmatch
|
||||
from logging import getLogger
|
||||
from typing import Callable, Optional, Set, Type
|
||||
|
||||
from opentelemetry.metrics import Instrument
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
DefaultAggregation,
|
||||
_Aggregation,
|
||||
_ExplicitBucketHistogramAggregation,
|
||||
_ExponentialBucketHistogramAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlignedHistogramBucketExemplarReservoir,
|
||||
ExemplarReservoirBuilder,
|
||||
SimpleFixedSizeExemplarReservoir,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
def _default_reservoir_factory(
|
||||
aggregation_type: Type[_Aggregation],
|
||||
) -> ExemplarReservoirBuilder:
|
||||
"""Default reservoir factory per aggregation."""
|
||||
if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation):
|
||||
return AlignedHistogramBucketExemplarReservoir
|
||||
if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation):
|
||||
return SimpleFixedSizeExemplarReservoir
|
||||
return SimpleFixedSizeExemplarReservoir
|
||||
|
||||
|
||||
class View:
|
||||
"""
|
||||
A `View` configuration parameters can be used for the following
|
||||
purposes:
|
||||
|
||||
1. Match instruments: When an instrument matches a view, measurements
|
||||
received by that instrument will be processed.
|
||||
2. Customize metric streams: A metric stream is identified by a match
|
||||
between a view and an instrument and a set of attributes. The metric
|
||||
stream can be customized by certain attributes of the corresponding view.
|
||||
|
||||
The attributes documented next serve one of the previous two purposes.
|
||||
|
||||
Args:
|
||||
instrument_type: This is an instrument matching attribute: the class the
|
||||
instrument must be to match the view.
|
||||
|
||||
instrument_name: This is an instrument matching attribute: the name the
|
||||
instrument must have to match the view. Wild card characters are supported. Wild
|
||||
card characters should not be used with this attribute if the view has also a
|
||||
``name`` defined.
|
||||
|
||||
meter_name: This is an instrument matching attribute: the name the
|
||||
instrument meter must have to match the view.
|
||||
|
||||
meter_version: This is an instrument matching attribute: the version
|
||||
the instrument meter must have to match the view.
|
||||
|
||||
meter_schema_url: This is an instrument matching attribute: the schema
|
||||
URL the instrument meter must have to match the view.
|
||||
|
||||
name: This is a metric stream customizing attribute: the name of the
|
||||
metric stream. If `None`, the name of the instrument will be used.
|
||||
|
||||
description: This is a metric stream customizing attribute: the
|
||||
description of the metric stream. If `None`, the description of the instrument will
|
||||
be used.
|
||||
|
||||
attribute_keys: This is a metric stream customizing attribute: this is
|
||||
a set of attribute keys. If not `None` then only the measurement attributes that
|
||||
are in ``attribute_keys`` will be used to identify the metric stream.
|
||||
|
||||
aggregation: This is a metric stream customizing attribute: the
|
||||
aggregation instance to use when data is aggregated for the
|
||||
corresponding metrics stream. If `None` an instance of
|
||||
`DefaultAggregation` will be used.
|
||||
|
||||
exemplar_reservoir_factory: This is a metric stream customizing attribute:
|
||||
the exemplar reservoir factory
|
||||
|
||||
instrument_unit: This is an instrument matching attribute: the unit the
|
||||
instrument must have to match the view.
|
||||
|
||||
This class is not intended to be subclassed by the user.
|
||||
"""
|
||||
|
||||
_default_aggregation = DefaultAggregation()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
instrument_type: Optional[Type[Instrument]] = None,
|
||||
instrument_name: Optional[str] = None,
|
||||
meter_name: Optional[str] = None,
|
||||
meter_version: Optional[str] = None,
|
||||
meter_schema_url: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
attribute_keys: Optional[Set[str]] = None,
|
||||
aggregation: Optional[Aggregation] = None,
|
||||
exemplar_reservoir_factory: Optional[
|
||||
Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]
|
||||
] = None,
|
||||
instrument_unit: Optional[str] = None,
|
||||
):
|
||||
if (
|
||||
instrument_type
|
||||
is instrument_name
|
||||
is instrument_unit
|
||||
is meter_name
|
||||
is meter_version
|
||||
is meter_schema_url
|
||||
is None
|
||||
):
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"Some instrument selection "
|
||||
f"criteria must be provided for View {name}"
|
||||
)
|
||||
|
||||
if (
|
||||
name is not None
|
||||
and instrument_name is not None
|
||||
and ("*" in instrument_name or "?" in instrument_name)
|
||||
):
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
f"View {name} declared with wildcard "
|
||||
"characters in instrument_name"
|
||||
)
|
||||
|
||||
# _name, _description, _aggregation, _exemplar_reservoir_factory and
|
||||
# _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch.
|
||||
self._name = name
|
||||
self._instrument_type = instrument_type
|
||||
self._instrument_name = instrument_name
|
||||
self._instrument_unit = instrument_unit
|
||||
self._meter_name = meter_name
|
||||
self._meter_version = meter_version
|
||||
self._meter_schema_url = meter_schema_url
|
||||
|
||||
self._description = description
|
||||
self._attribute_keys = attribute_keys
|
||||
self._aggregation = aggregation or self._default_aggregation
|
||||
self._exemplar_reservoir_factory = (
|
||||
exemplar_reservoir_factory or _default_reservoir_factory
|
||||
)
|
||||
|
||||
# pylint: disable=too-many-return-statements
|
||||
# pylint: disable=too-many-branches
|
||||
def _match(self, instrument: Instrument) -> bool:
|
||||
if self._instrument_type is not None:
|
||||
if not isinstance(instrument, self._instrument_type):
|
||||
return False
|
||||
|
||||
if self._instrument_name is not None:
|
||||
if not fnmatch(instrument.name, self._instrument_name):
|
||||
return False
|
||||
|
||||
if self._instrument_unit is not None:
|
||||
if not fnmatch(instrument.unit, self._instrument_unit):
|
||||
return False
|
||||
|
||||
if self._meter_name is not None:
|
||||
if instrument.instrumentation_scope.name != self._meter_name:
|
||||
return False
|
||||
|
||||
if self._meter_version is not None:
|
||||
if instrument.instrumentation_scope.version != self._meter_version:
|
||||
return False
|
||||
|
||||
if self._meter_schema_url is not None:
|
||||
if (
|
||||
instrument.instrumentation_scope.schema_url
|
||||
!= self._meter_schema_url
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
Reference in New Issue
Block a user