structure saas with tools
This commit is contained in:
@@ -0,0 +1,18 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
The OpenTelemetry SDK package is an implementation of the OpenTelemetry
|
||||
API
|
||||
"""
|
||||
@@ -0,0 +1,481 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
OpenTelemetry SDK Configurator for Easy Instrumentation with Distros
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from os import environ
|
||||
from typing import Callable, Sequence, Type, Union
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from opentelemetry._events import set_event_logger_provider
|
||||
from opentelemetry._logs import set_logger_provider
|
||||
from opentelemetry.environment_variables import (
|
||||
OTEL_LOGS_EXPORTER,
|
||||
OTEL_METRICS_EXPORTER,
|
||||
OTEL_PYTHON_ID_GENERATOR,
|
||||
OTEL_TRACES_EXPORTER,
|
||||
)
|
||||
from opentelemetry.metrics import set_meter_provider
|
||||
from opentelemetry.sdk._events import EventLoggerProvider
|
||||
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, LogExporter
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED,
|
||||
OTEL_EXPORTER_OTLP_LOGS_PROTOCOL,
|
||||
OTEL_EXPORTER_OTLP_METRICS_PROTOCOL,
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL,
|
||||
OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
|
||||
OTEL_TRACES_SAMPLER,
|
||||
OTEL_TRACES_SAMPLER_ARG,
|
||||
)
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import (
|
||||
MetricExporter,
|
||||
MetricReader,
|
||||
PeriodicExportingMetricReader,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Attributes, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
|
||||
from opentelemetry.sdk.trace.id_generator import IdGenerator
|
||||
from opentelemetry.sdk.trace.sampling import Sampler
|
||||
from opentelemetry.semconv.resource import ResourceAttributes
|
||||
from opentelemetry.trace import set_tracer_provider
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
||||
_EXPORTER_OTLP = "otlp"
|
||||
_EXPORTER_OTLP_PROTO_GRPC = "otlp_proto_grpc"
|
||||
_EXPORTER_OTLP_PROTO_HTTP = "otlp_proto_http"
|
||||
|
||||
_EXPORTER_BY_OTLP_PROTOCOL = {
|
||||
"grpc": _EXPORTER_OTLP_PROTO_GRPC,
|
||||
"http/protobuf": _EXPORTER_OTLP_PROTO_HTTP,
|
||||
}
|
||||
|
||||
_EXPORTER_ENV_BY_SIGNAL_TYPE = {
|
||||
"traces": OTEL_TRACES_EXPORTER,
|
||||
"metrics": OTEL_METRICS_EXPORTER,
|
||||
"logs": OTEL_LOGS_EXPORTER,
|
||||
}
|
||||
|
||||
_PROTOCOL_ENV_BY_SIGNAL_TYPE = {
|
||||
"traces": OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
|
||||
"metrics": OTEL_EXPORTER_OTLP_METRICS_PROTOCOL,
|
||||
"logs": OTEL_EXPORTER_OTLP_LOGS_PROTOCOL,
|
||||
}
|
||||
|
||||
_RANDOM_ID_GENERATOR = "random"
|
||||
_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR
|
||||
|
||||
_OTEL_SAMPLER_ENTRY_POINT_GROUP = "opentelemetry_traces_sampler"
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _import_config_components(
|
||||
selected_components: list[str], entry_point_name: str
|
||||
) -> Sequence[tuple[str, object]]:
|
||||
component_implementations = []
|
||||
|
||||
for selected_component in selected_components:
|
||||
try:
|
||||
component_implementations.append(
|
||||
(
|
||||
selected_component,
|
||||
next(
|
||||
iter(
|
||||
entry_points(
|
||||
group=entry_point_name, name=selected_component
|
||||
)
|
||||
)
|
||||
).load(),
|
||||
)
|
||||
)
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
f"Requested entry point '{entry_point_name}' not found"
|
||||
)
|
||||
|
||||
except StopIteration:
|
||||
raise RuntimeError(
|
||||
f"Requested component '{selected_component}' not found in "
|
||||
f"entry point '{entry_point_name}'"
|
||||
)
|
||||
|
||||
return component_implementations
|
||||
|
||||
|
||||
def _get_sampler() -> str | None:
|
||||
return environ.get(OTEL_TRACES_SAMPLER, None)
|
||||
|
||||
|
||||
def _get_id_generator() -> str:
|
||||
return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)
|
||||
|
||||
|
||||
def _get_exporter_entry_point(
|
||||
exporter_name: str, signal_type: Literal["traces", "metrics", "logs"]
|
||||
):
|
||||
if exporter_name not in (
|
||||
_EXPORTER_OTLP,
|
||||
_EXPORTER_OTLP_PROTO_GRPC,
|
||||
_EXPORTER_OTLP_PROTO_HTTP,
|
||||
):
|
||||
return exporter_name
|
||||
|
||||
# Checking env vars for OTLP protocol (grpc/http).
|
||||
otlp_protocol = environ.get(
|
||||
_PROTOCOL_ENV_BY_SIGNAL_TYPE[signal_type]
|
||||
) or environ.get(OTEL_EXPORTER_OTLP_PROTOCOL)
|
||||
|
||||
if not otlp_protocol:
|
||||
if exporter_name == _EXPORTER_OTLP:
|
||||
return _EXPORTER_OTLP_PROTO_GRPC
|
||||
return exporter_name
|
||||
|
||||
otlp_protocol = otlp_protocol.strip()
|
||||
|
||||
if exporter_name == _EXPORTER_OTLP:
|
||||
if otlp_protocol not in _EXPORTER_BY_OTLP_PROTOCOL:
|
||||
# Invalid value was set by the env var
|
||||
raise RuntimeError(
|
||||
f"Unsupported OTLP protocol '{otlp_protocol}' is configured"
|
||||
)
|
||||
|
||||
return _EXPORTER_BY_OTLP_PROTOCOL[otlp_protocol]
|
||||
|
||||
# grpc/http already specified by exporter_name, only add a warning in case
|
||||
# of a conflict.
|
||||
exporter_name_by_env = _EXPORTER_BY_OTLP_PROTOCOL.get(otlp_protocol)
|
||||
if exporter_name_by_env and exporter_name != exporter_name_by_env:
|
||||
_logger.warning(
|
||||
"Conflicting values for %s OTLP exporter protocol, using '%s'",
|
||||
signal_type,
|
||||
exporter_name,
|
||||
)
|
||||
|
||||
return exporter_name
|
||||
|
||||
|
||||
def _get_exporter_names(
|
||||
signal_type: Literal["traces", "metrics", "logs"],
|
||||
) -> Sequence[str]:
|
||||
names = environ.get(_EXPORTER_ENV_BY_SIGNAL_TYPE.get(signal_type, ""))
|
||||
|
||||
if not names or names.lower().strip() == "none":
|
||||
return []
|
||||
|
||||
return [
|
||||
_get_exporter_entry_point(_exporter.strip(), signal_type)
|
||||
for _exporter in names.split(",")
|
||||
]
|
||||
|
||||
|
||||
def _init_tracing(
|
||||
exporters: dict[str, Type[SpanExporter]],
|
||||
id_generator: IdGenerator | None = None,
|
||||
sampler: Sampler | None = None,
|
||||
resource: Resource | None = None,
|
||||
):
|
||||
provider = TracerProvider(
|
||||
id_generator=id_generator,
|
||||
sampler=sampler,
|
||||
resource=resource,
|
||||
)
|
||||
set_tracer_provider(provider)
|
||||
|
||||
for _, exporter_class in exporters.items():
|
||||
exporter_args = {}
|
||||
provider.add_span_processor(
|
||||
BatchSpanProcessor(exporter_class(**exporter_args))
|
||||
)
|
||||
|
||||
|
||||
def _init_metrics(
|
||||
exporters_or_readers: dict[
|
||||
str, Union[Type[MetricExporter], Type[MetricReader]]
|
||||
],
|
||||
resource: Resource | None = None,
|
||||
):
|
||||
metric_readers = []
|
||||
|
||||
for _, exporter_or_reader_class in exporters_or_readers.items():
|
||||
exporter_args = {}
|
||||
|
||||
if issubclass(exporter_or_reader_class, MetricReader):
|
||||
metric_readers.append(exporter_or_reader_class(**exporter_args))
|
||||
else:
|
||||
metric_readers.append(
|
||||
PeriodicExportingMetricReader(
|
||||
exporter_or_reader_class(**exporter_args)
|
||||
)
|
||||
)
|
||||
|
||||
provider = MeterProvider(resource=resource, metric_readers=metric_readers)
|
||||
set_meter_provider(provider)
|
||||
|
||||
|
||||
def _init_logging(
|
||||
exporters: dict[str, Type[LogExporter]],
|
||||
resource: Resource | None = None,
|
||||
setup_logging_handler: bool = True,
|
||||
):
|
||||
provider = LoggerProvider(resource=resource)
|
||||
set_logger_provider(provider)
|
||||
|
||||
for _, exporter_class in exporters.items():
|
||||
exporter_args = {}
|
||||
provider.add_log_record_processor(
|
||||
BatchLogRecordProcessor(exporter_class(**exporter_args))
|
||||
)
|
||||
|
||||
event_logger_provider = EventLoggerProvider(logger_provider=provider)
|
||||
set_event_logger_provider(event_logger_provider)
|
||||
|
||||
if setup_logging_handler:
|
||||
_patch_basic_config()
|
||||
|
||||
# Add OTel handler
|
||||
handler = LoggingHandler(
|
||||
level=logging.NOTSET, logger_provider=provider
|
||||
)
|
||||
logging.getLogger().addHandler(handler)
|
||||
|
||||
|
||||
def _patch_basic_config():
|
||||
original_basic_config = logging.basicConfig
|
||||
|
||||
def patched_basic_config(*args, **kwargs):
|
||||
root = logging.getLogger()
|
||||
has_only_otel = len(root.handlers) == 1 and isinstance(
|
||||
root.handlers[0], LoggingHandler
|
||||
)
|
||||
if has_only_otel:
|
||||
otel_handler = root.handlers.pop()
|
||||
original_basic_config(*args, **kwargs)
|
||||
root.addHandler(otel_handler)
|
||||
else:
|
||||
original_basic_config(*args, **kwargs)
|
||||
|
||||
logging.basicConfig = patched_basic_config
|
||||
|
||||
|
||||
def _import_exporters(
|
||||
trace_exporter_names: Sequence[str],
|
||||
metric_exporter_names: Sequence[str],
|
||||
log_exporter_names: Sequence[str],
|
||||
) -> tuple[
|
||||
dict[str, Type[SpanExporter]],
|
||||
dict[str, Union[Type[MetricExporter], Type[MetricReader]]],
|
||||
dict[str, Type[LogExporter]],
|
||||
]:
|
||||
trace_exporters = {}
|
||||
metric_exporters = {}
|
||||
log_exporters = {}
|
||||
|
||||
for (
|
||||
exporter_name,
|
||||
exporter_impl,
|
||||
) in _import_config_components(
|
||||
trace_exporter_names, "opentelemetry_traces_exporter"
|
||||
):
|
||||
if issubclass(exporter_impl, SpanExporter):
|
||||
trace_exporters[exporter_name] = exporter_impl
|
||||
else:
|
||||
raise RuntimeError(f"{exporter_name} is not a trace exporter")
|
||||
|
||||
for (
|
||||
exporter_name,
|
||||
exporter_impl,
|
||||
) in _import_config_components(
|
||||
metric_exporter_names, "opentelemetry_metrics_exporter"
|
||||
):
|
||||
# The metric exporter components may be push MetricExporter or pull exporters which
|
||||
# subclass MetricReader directly
|
||||
if issubclass(exporter_impl, (MetricExporter, MetricReader)):
|
||||
metric_exporters[exporter_name] = exporter_impl
|
||||
else:
|
||||
raise RuntimeError(f"{exporter_name} is not a metric exporter")
|
||||
|
||||
for (
|
||||
exporter_name,
|
||||
exporter_impl,
|
||||
) in _import_config_components(
|
||||
log_exporter_names, "opentelemetry_logs_exporter"
|
||||
):
|
||||
if issubclass(exporter_impl, LogExporter):
|
||||
log_exporters[exporter_name] = exporter_impl
|
||||
else:
|
||||
raise RuntimeError(f"{exporter_name} is not a log exporter")
|
||||
|
||||
return trace_exporters, metric_exporters, log_exporters
|
||||
|
||||
|
||||
def _import_sampler_factory(sampler_name: str) -> Callable[[str], Sampler]:
|
||||
_, sampler_impl = _import_config_components(
|
||||
[sampler_name.strip()], _OTEL_SAMPLER_ENTRY_POINT_GROUP
|
||||
)[0]
|
||||
return sampler_impl
|
||||
|
||||
|
||||
def _import_sampler(sampler_name: str) -> Sampler | None:
|
||||
if not sampler_name:
|
||||
return None
|
||||
try:
|
||||
sampler_factory = _import_sampler_factory(sampler_name)
|
||||
arg = None
|
||||
if sampler_name in ("traceidratio", "parentbased_traceidratio"):
|
||||
try:
|
||||
rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG))
|
||||
except (ValueError, TypeError):
|
||||
_logger.warning(
|
||||
"Could not convert TRACES_SAMPLER_ARG to float. Using default value 1.0."
|
||||
)
|
||||
rate = 1.0
|
||||
arg = rate
|
||||
else:
|
||||
arg = os.getenv(OTEL_TRACES_SAMPLER_ARG)
|
||||
|
||||
sampler = sampler_factory(arg)
|
||||
if not isinstance(sampler, Sampler):
|
||||
message = f"Sampler factory, {sampler_factory}, produced output, {sampler}, which is not a Sampler."
|
||||
_logger.warning(message)
|
||||
raise ValueError(message)
|
||||
return sampler
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
_logger.warning(
|
||||
"Using default sampler. Failed to initialize sampler, %s: %s",
|
||||
sampler_name,
|
||||
exc,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _import_id_generator(id_generator_name: str) -> IdGenerator:
|
||||
id_generator_name, id_generator_impl = _import_config_components(
|
||||
[id_generator_name.strip()], "opentelemetry_id_generator"
|
||||
)[0]
|
||||
|
||||
if issubclass(id_generator_impl, IdGenerator):
|
||||
return id_generator_impl()
|
||||
|
||||
raise RuntimeError(f"{id_generator_name} is not an IdGenerator")
|
||||
|
||||
|
||||
def _initialize_components(
|
||||
auto_instrumentation_version: str | None = None,
|
||||
trace_exporter_names: list[str] | None = None,
|
||||
metric_exporter_names: list[str] | None = None,
|
||||
log_exporter_names: list[str] | None = None,
|
||||
sampler: Sampler | None = None,
|
||||
resource_attributes: Attributes | None = None,
|
||||
id_generator: IdGenerator | None = None,
|
||||
setup_logging_handler: bool | None = None,
|
||||
):
|
||||
if trace_exporter_names is None:
|
||||
trace_exporter_names = []
|
||||
if metric_exporter_names is None:
|
||||
metric_exporter_names = []
|
||||
if log_exporter_names is None:
|
||||
log_exporter_names = []
|
||||
span_exporters, metric_exporters, log_exporters = _import_exporters(
|
||||
trace_exporter_names + _get_exporter_names("traces"),
|
||||
metric_exporter_names + _get_exporter_names("metrics"),
|
||||
log_exporter_names + _get_exporter_names("logs"),
|
||||
)
|
||||
if sampler is None:
|
||||
sampler_name = _get_sampler()
|
||||
sampler = _import_sampler(sampler_name)
|
||||
if id_generator is None:
|
||||
id_generator_name = _get_id_generator()
|
||||
id_generator = _import_id_generator(id_generator_name)
|
||||
if resource_attributes is None:
|
||||
resource_attributes = {}
|
||||
# populate version if using auto-instrumentation
|
||||
if auto_instrumentation_version:
|
||||
resource_attributes[ResourceAttributes.TELEMETRY_AUTO_VERSION] = (
|
||||
auto_instrumentation_version
|
||||
)
|
||||
# if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
|
||||
# from the env variable else defaults to "unknown_service"
|
||||
resource = Resource.create(resource_attributes)
|
||||
|
||||
_init_tracing(
|
||||
exporters=span_exporters,
|
||||
id_generator=id_generator,
|
||||
sampler=sampler,
|
||||
resource=resource,
|
||||
)
|
||||
_init_metrics(metric_exporters, resource)
|
||||
if setup_logging_handler is None:
|
||||
setup_logging_handler = (
|
||||
os.getenv(
|
||||
_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, "false"
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
== "true"
|
||||
)
|
||||
_init_logging(log_exporters, resource, setup_logging_handler)
|
||||
|
||||
|
||||
class _BaseConfigurator(ABC):
|
||||
"""An ABC for configurators
|
||||
|
||||
Configurators are used to configure
|
||||
SDKs (i.e. TracerProvider, MeterProvider, Processors...)
|
||||
to reduce the amount of manual configuration required.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
_is_instrumented = False
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
cls._instance = object.__new__(cls, *args, **kwargs)
|
||||
|
||||
return cls._instance
|
||||
|
||||
@abstractmethod
|
||||
def _configure(self, **kwargs):
|
||||
"""Configure the SDK"""
|
||||
|
||||
def configure(self, **kwargs):
|
||||
"""Configure the SDK"""
|
||||
self._configure(**kwargs)
|
||||
|
||||
|
||||
class _OTelSDKConfigurator(_BaseConfigurator):
|
||||
"""A basic Configurator by OTel Python for initializing OTel SDK components
|
||||
|
||||
Initializes several crucial OTel SDK components (i.e. TracerProvider,
|
||||
MeterProvider, Processors...) according to a default implementation. Other
|
||||
Configurators can subclass and slightly alter this initialization.
|
||||
|
||||
NOTE: This class should not be instantiated nor should it become an entry
|
||||
point on the `opentelemetry-sdk` package. Instead, distros should subclass
|
||||
this Configurator and enhance it as needed.
|
||||
"""
|
||||
|
||||
def _configure(self, **kwargs):
|
||||
_initialize_components(**kwargs)
|
||||
Binary file not shown.
@@ -0,0 +1,89 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from time import time_ns
|
||||
from typing import Optional
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry._events import Event
|
||||
from opentelemetry._events import EventLogger as APIEventLogger
|
||||
from opentelemetry._events import EventLoggerProvider as APIEventLoggerProvider
|
||||
from opentelemetry._logs import NoOpLogger, SeverityNumber, get_logger_provider
|
||||
from opentelemetry.sdk._logs import Logger, LoggerProvider, LogRecord
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventLogger(APIEventLogger):
|
||||
def __init__(
|
||||
self,
|
||||
logger_provider: LoggerProvider,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
):
|
||||
super().__init__(
|
||||
name=name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
self._logger: Logger = logger_provider.get_logger(
|
||||
name, version, schema_url, attributes
|
||||
)
|
||||
|
||||
def emit(self, event: Event) -> None:
|
||||
if isinstance(self._logger, NoOpLogger):
|
||||
# Do nothing if SDK is disabled
|
||||
return
|
||||
span_context = trace.get_current_span().get_span_context()
|
||||
log_record = LogRecord(
|
||||
timestamp=event.timestamp or time_ns(),
|
||||
observed_timestamp=None,
|
||||
trace_id=event.trace_id or span_context.trace_id,
|
||||
span_id=event.span_id or span_context.span_id,
|
||||
trace_flags=event.trace_flags or span_context.trace_flags,
|
||||
severity_text=None,
|
||||
severity_number=event.severity_number or SeverityNumber.INFO,
|
||||
body=event.body,
|
||||
resource=getattr(self._logger, "resource", None),
|
||||
attributes=event.attributes,
|
||||
)
|
||||
self._logger.emit(log_record)
|
||||
|
||||
|
||||
class EventLoggerProvider(APIEventLoggerProvider):
|
||||
def __init__(self, logger_provider: Optional[LoggerProvider] = None):
|
||||
self._logger_provider = logger_provider or get_logger_provider()
|
||||
|
||||
def get_event_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> EventLogger:
|
||||
if not name:
|
||||
_logger.warning("EventLogger created with invalid name: %s", name)
|
||||
return EventLogger(
|
||||
self._logger_provider, name, version, schema_url, attributes
|
||||
)
|
||||
|
||||
def shutdown(self):
|
||||
self._logger_provider.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
self._logger_provider.force_flush(timeout_millis)
|
||||
Binary file not shown.
@@ -0,0 +1,36 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from opentelemetry.sdk._logs._internal import (
|
||||
LogData,
|
||||
LogDroppedAttributesWarning,
|
||||
Logger,
|
||||
LoggerProvider,
|
||||
LoggingHandler,
|
||||
LogLimits,
|
||||
LogRecord,
|
||||
LogRecordProcessor,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"LogData",
|
||||
"Logger",
|
||||
"LoggerProvider",
|
||||
"LoggingHandler",
|
||||
"LogLimits",
|
||||
"LogRecord",
|
||||
"LogRecordProcessor",
|
||||
"LogDroppedAttributesWarning",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,715 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import atexit
|
||||
import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import traceback
|
||||
import warnings
|
||||
from os import environ
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Any, Callable, Tuple, Union # noqa
|
||||
|
||||
from opentelemetry._logs import Logger as APILogger
|
||||
from opentelemetry._logs import LoggerProvider as APILoggerProvider
|
||||
from opentelemetry._logs import LogRecord as APILogRecord
|
||||
from opentelemetry._logs import (
|
||||
NoOpLogger,
|
||||
SeverityNumber,
|
||||
get_logger,
|
||||
get_logger_provider,
|
||||
std_to_otel,
|
||||
)
|
||||
from opentelemetry.attributes import _VALID_ANY_VALUE_TYPES, BoundedAttributes
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_ATTRIBUTE_COUNT_LIMIT,
|
||||
OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
|
||||
OTEL_SDK_DISABLED,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util import ns_to_iso_str
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.semconv.trace import SpanAttributes
|
||||
from opentelemetry.trace import (
|
||||
format_span_id,
|
||||
format_trace_id,
|
||||
get_current_span,
|
||||
)
|
||||
from opentelemetry.trace.span import TraceFlags
|
||||
from opentelemetry.util.types import AnyValue, Attributes
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128
|
||||
_ENV_VALUE_UNSET = ""
|
||||
|
||||
|
||||
class LogDroppedAttributesWarning(UserWarning):
|
||||
"""Custom warning to indicate dropped log attributes due to limits.
|
||||
|
||||
This class is used to filter and handle these specific warnings separately
|
||||
from other warnings, ensuring that they are only shown once without
|
||||
interfering with default user warnings.
|
||||
"""
|
||||
|
||||
|
||||
warnings.simplefilter("once", LogDroppedAttributesWarning)
|
||||
|
||||
|
||||
class LogLimits:
|
||||
"""This class is based on a SpanLimits class in the Tracing module.
|
||||
|
||||
This class represents the limits that should be enforced on recorded data such as events, links, attributes etc.
|
||||
|
||||
This class does not enforce any limits itself. It only provides a way to read limits from env,
|
||||
default values and from user provided arguments.
|
||||
|
||||
All limit arguments must be either a non-negative integer, ``None`` or ``LogLimits.UNSET``.
|
||||
|
||||
- All limit arguments are optional.
|
||||
- If a limit argument is not set, the class will try to read its value from the corresponding
|
||||
environment variable.
|
||||
- If the environment variable is not set, the default value, if any, will be used.
|
||||
|
||||
Limit precedence:
|
||||
|
||||
- If a model specific limit is set, it will be used.
|
||||
- Else if the corresponding global limit is set, it will be used.
|
||||
- Else if the model specific limit has a default value, the default value will be used.
|
||||
- Else if the global limit has a default value, the default value will be used.
|
||||
|
||||
Args:
|
||||
max_attributes: Maximum number of attributes that can be added to a span, event, and link.
|
||||
Environment variable: ``OTEL_ATTRIBUTE_COUNT_LIMIT``
|
||||
Default: {_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT}
|
||||
max_attribute_length: Maximum length an attribute value can have. Values longer than
|
||||
the specified length will be truncated.
|
||||
"""
|
||||
|
||||
UNSET = -1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_attributes: int | None = None,
|
||||
max_attribute_length: int | None = None,
|
||||
):
|
||||
# attribute count
|
||||
global_max_attributes = self._from_env_if_absent(
|
||||
max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT
|
||||
)
|
||||
self.max_attributes = (
|
||||
global_max_attributes
|
||||
if global_max_attributes is not None
|
||||
else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT
|
||||
)
|
||||
|
||||
# attribute length
|
||||
self.max_attribute_length = self._from_env_if_absent(
|
||||
max_attribute_length,
|
||||
OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"{type(self).__name__}(max_attributes={self.max_attributes}, max_attribute_length={self.max_attribute_length})"
|
||||
|
||||
@classmethod
|
||||
def _from_env_if_absent(
|
||||
cls, value: int | None, env_var: str, default: int | None = None
|
||||
) -> int | None:
|
||||
if value == cls.UNSET:
|
||||
return None
|
||||
|
||||
err_msg = "{} must be a non-negative integer but got {}"
|
||||
|
||||
# if no value is provided for the limit, try to load it from env
|
||||
if value is None:
|
||||
# return default value if env var is not set
|
||||
if env_var not in environ:
|
||||
return default
|
||||
|
||||
str_value = environ.get(env_var, "").strip().lower()
|
||||
if str_value == _ENV_VALUE_UNSET:
|
||||
return None
|
||||
|
||||
try:
|
||||
value = int(str_value)
|
||||
except ValueError:
|
||||
raise ValueError(err_msg.format(env_var, str_value))
|
||||
|
||||
if value < 0:
|
||||
raise ValueError(err_msg.format(env_var, value))
|
||||
return value
|
||||
|
||||
|
||||
_UnsetLogLimits = LogLimits(
|
||||
max_attributes=LogLimits.UNSET,
|
||||
max_attribute_length=LogLimits.UNSET,
|
||||
)
|
||||
|
||||
|
||||
class LogRecord(APILogRecord):
|
||||
"""A LogRecord instance represents an event being logged.
|
||||
|
||||
LogRecord instances are created and emitted via `Logger`
|
||||
every time something is logged. They contain all the information
|
||||
pertinent to the event being logged.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timestamp: int | None = None,
|
||||
observed_timestamp: int | None = None,
|
||||
trace_id: int | None = None,
|
||||
span_id: int | None = None,
|
||||
trace_flags: TraceFlags | None = None,
|
||||
severity_text: str | None = None,
|
||||
severity_number: SeverityNumber | None = None,
|
||||
body: AnyValue | None = None,
|
||||
resource: Resource | None = None,
|
||||
attributes: Attributes | None = None,
|
||||
limits: LogLimits | None = _UnsetLogLimits,
|
||||
):
|
||||
super().__init__(
|
||||
**{
|
||||
"timestamp": timestamp,
|
||||
"observed_timestamp": observed_timestamp,
|
||||
"trace_id": trace_id,
|
||||
"span_id": span_id,
|
||||
"trace_flags": trace_flags,
|
||||
"severity_text": severity_text,
|
||||
"severity_number": severity_number,
|
||||
"body": body,
|
||||
"attributes": BoundedAttributes(
|
||||
maxlen=limits.max_attributes,
|
||||
attributes=attributes if bool(attributes) else None,
|
||||
immutable=False,
|
||||
max_value_len=limits.max_attribute_length,
|
||||
),
|
||||
}
|
||||
)
|
||||
self.resource = (
|
||||
resource if isinstance(resource, Resource) else Resource.create({})
|
||||
)
|
||||
if self.dropped_attributes > 0:
|
||||
warnings.warn(
|
||||
"Log record attributes were dropped due to limits",
|
||||
LogDroppedAttributesWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, LogRecord):
|
||||
return NotImplemented
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def to_json(self, indent: int | None = 4) -> str:
|
||||
return json.dumps(
|
||||
{
|
||||
"body": self.body,
|
||||
"severity_number": self.severity_number.value
|
||||
if self.severity_number is not None
|
||||
else None,
|
||||
"severity_text": self.severity_text,
|
||||
"attributes": (
|
||||
dict(self.attributes) if bool(self.attributes) else None
|
||||
),
|
||||
"dropped_attributes": self.dropped_attributes,
|
||||
"timestamp": ns_to_iso_str(self.timestamp),
|
||||
"observed_timestamp": ns_to_iso_str(self.observed_timestamp),
|
||||
"trace_id": (
|
||||
f"0x{format_trace_id(self.trace_id)}"
|
||||
if self.trace_id is not None
|
||||
else ""
|
||||
),
|
||||
"span_id": (
|
||||
f"0x{format_span_id(self.span_id)}"
|
||||
if self.span_id is not None
|
||||
else ""
|
||||
),
|
||||
"trace_flags": self.trace_flags,
|
||||
"resource": json.loads(self.resource.to_json()),
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
@property
|
||||
def dropped_attributes(self) -> int:
|
||||
if self.attributes:
|
||||
return self.attributes.dropped
|
||||
return 0
|
||||
|
||||
|
||||
class LogData:
|
||||
"""Readable LogRecord data plus associated InstrumentationLibrary."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
log_record: LogRecord,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
):
|
||||
self.log_record = log_record
|
||||
self.instrumentation_scope = instrumentation_scope
|
||||
|
||||
|
||||
class LogRecordProcessor(abc.ABC):
|
||||
"""Interface to hook the log record emitting action.
|
||||
|
||||
Log processors can be registered directly using
|
||||
:func:`LoggerProvider.add_log_record_processor` and they are invoked
|
||||
in the same order as they were registered.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def emit(self, log_data: LogData):
|
||||
"""Emits the `LogData`"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def shutdown(self):
|
||||
"""Called when a :class:`opentelemetry.sdk._logs.Logger` is shutdown"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def force_flush(self, timeout_millis: int = 30000):
|
||||
"""Export all the received logs to the configured Exporter that have not yet
|
||||
been exported.
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported.
|
||||
|
||||
Returns:
|
||||
False if the timeout is exceeded, True otherwise.
|
||||
"""
|
||||
|
||||
|
||||
# Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved
|
||||
# pylint:disable=no-member
|
||||
class SynchronousMultiLogRecordProcessor(LogRecordProcessor):
|
||||
"""Implementation of class:`LogRecordProcessor` that forwards all received
|
||||
events to a list of log processors sequentially.
|
||||
|
||||
The underlying log processors are called in sequential order as they were
|
||||
added.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# use a tuple to avoid race conditions when adding a new log and
|
||||
# iterating through it on "emit".
|
||||
self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...]
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def add_log_record_processor(
|
||||
self, log_record_processor: LogRecordProcessor
|
||||
) -> None:
|
||||
"""Adds a Logprocessor to the list of log processors handled by this instance"""
|
||||
with self._lock:
|
||||
self._log_record_processors += (log_record_processor,)
|
||||
|
||||
def emit(self, log_data: LogData) -> None:
|
||||
for lp in self._log_record_processors:
|
||||
lp.emit(log_data)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown the log processors one by one"""
|
||||
for lp in self._log_record_processors:
|
||||
lp.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Force flush the log processors one by one
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported. If the first n log processors exceeded the timeout
|
||||
then remaining log processors will not be flushed.
|
||||
|
||||
Returns:
|
||||
True if all the log processors flushes the logs within timeout,
|
||||
False otherwise.
|
||||
"""
|
||||
deadline_ns = time_ns() + timeout_millis * 1000000
|
||||
for lp in self._log_record_processors:
|
||||
current_ts = time_ns()
|
||||
if current_ts >= deadline_ns:
|
||||
return False
|
||||
|
||||
if not lp.force_flush((deadline_ns - current_ts) // 1000000):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ConcurrentMultiLogRecordProcessor(LogRecordProcessor):
|
||||
"""Implementation of :class:`LogRecordProcessor` that forwards all received
|
||||
events to a list of log processors in parallel.
|
||||
|
||||
Calls to the underlying log processors are forwarded in parallel by
|
||||
submitting them to a thread pool executor and waiting until each log
|
||||
processor finished its work.
|
||||
|
||||
Args:
|
||||
max_workers: The number of threads managed by the thread pool executor
|
||||
and thus defining how many log processors can work in parallel.
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers: int = 2):
|
||||
# use a tuple to avoid race conditions when adding a new log and
|
||||
# iterating through it on "emit".
|
||||
self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...]
|
||||
self._lock = threading.Lock()
|
||||
self._executor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
)
|
||||
|
||||
def add_log_record_processor(
|
||||
self, log_record_processor: LogRecordProcessor
|
||||
):
|
||||
with self._lock:
|
||||
self._log_record_processors += (log_record_processor,)
|
||||
|
||||
def _submit_and_wait(
|
||||
self,
|
||||
func: Callable[[LogRecordProcessor], Callable[..., None]],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
):
|
||||
futures = []
|
||||
for lp in self._log_record_processors:
|
||||
future = self._executor.submit(func(lp), *args, **kwargs)
|
||||
futures.append(future)
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
def emit(self, log_data: LogData):
|
||||
self._submit_and_wait(lambda lp: lp.emit, log_data)
|
||||
|
||||
def shutdown(self):
|
||||
self._submit_and_wait(lambda lp: lp.shutdown)
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Force flush the log processors in parallel.
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported.
|
||||
|
||||
Returns:
|
||||
True if all the log processors flushes the logs within timeout,
|
||||
False otherwise.
|
||||
"""
|
||||
futures = []
|
||||
for lp in self._log_record_processors:
|
||||
future = self._executor.submit(lp.force_flush, timeout_millis)
|
||||
futures.append(future)
|
||||
|
||||
done_futures, not_done_futures = concurrent.futures.wait(
|
||||
futures, timeout_millis / 1e3
|
||||
)
|
||||
|
||||
if not_done_futures:
|
||||
return False
|
||||
|
||||
for future in done_futures:
|
||||
if not future.result():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# skip natural LogRecord attributes
|
||||
# http://docs.python.org/library/logging.html#logrecord-attributes
|
||||
_RESERVED_ATTRS = frozenset(
|
||||
(
|
||||
"asctime",
|
||||
"args",
|
||||
"created",
|
||||
"exc_info",
|
||||
"exc_text",
|
||||
"filename",
|
||||
"funcName",
|
||||
"getMessage",
|
||||
"message",
|
||||
"levelname",
|
||||
"levelno",
|
||||
"lineno",
|
||||
"module",
|
||||
"msecs",
|
||||
"msg",
|
||||
"name",
|
||||
"pathname",
|
||||
"process",
|
||||
"processName",
|
||||
"relativeCreated",
|
||||
"stack_info",
|
||||
"thread",
|
||||
"threadName",
|
||||
"taskName",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class LoggingHandler(logging.Handler):
|
||||
"""A handler class which writes logging records, in OTLP format, to
|
||||
a network destination or file. Supports signals from the `logging` module.
|
||||
https://docs.python.org/3/library/logging.html
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
level=logging.NOTSET,
|
||||
logger_provider=None,
|
||||
) -> None:
|
||||
super().__init__(level=level)
|
||||
self._logger_provider = logger_provider or get_logger_provider()
|
||||
|
||||
@staticmethod
|
||||
def _get_attributes(record: logging.LogRecord) -> Attributes:
|
||||
attributes = {
|
||||
k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS
|
||||
}
|
||||
|
||||
# Add standard code attributes for logs.
|
||||
attributes[SpanAttributes.CODE_FILEPATH] = record.pathname
|
||||
attributes[SpanAttributes.CODE_FUNCTION] = record.funcName
|
||||
attributes[SpanAttributes.CODE_LINENO] = record.lineno
|
||||
|
||||
if record.exc_info:
|
||||
exctype, value, tb = record.exc_info
|
||||
if exctype is not None:
|
||||
attributes[SpanAttributes.EXCEPTION_TYPE] = exctype.__name__
|
||||
if value is not None and value.args:
|
||||
attributes[SpanAttributes.EXCEPTION_MESSAGE] = str(
|
||||
value.args[0]
|
||||
)
|
||||
if tb is not None:
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/9fa7c656b26647b27e485a6af7e38dc716eba98a/specification/trace/semantic_conventions/exceptions.md#stacktrace-representation
|
||||
attributes[SpanAttributes.EXCEPTION_STACKTRACE] = "".join(
|
||||
traceback.format_exception(*record.exc_info)
|
||||
)
|
||||
return attributes
|
||||
|
||||
def _translate(self, record: logging.LogRecord) -> LogRecord:
|
||||
timestamp = int(record.created * 1e9)
|
||||
observered_timestamp = time_ns()
|
||||
span_context = get_current_span().get_span_context()
|
||||
attributes = self._get_attributes(record)
|
||||
severity_number = std_to_otel(record.levelno)
|
||||
if self.formatter:
|
||||
body = self.format(record)
|
||||
else:
|
||||
# `record.getMessage()` uses `record.msg` as a template to format
|
||||
# `record.args` into. There is a special case in `record.getMessage()`
|
||||
# where it will only attempt formatting if args are provided,
|
||||
# otherwise, it just stringifies `record.msg`.
|
||||
#
|
||||
# Since the OTLP body field has a type of 'any' and the logging module
|
||||
# is sometimes used in such a way that objects incorrectly end up
|
||||
# set as record.msg, in those cases we would like to bypass
|
||||
# `record.getMessage()` completely and set the body to the object
|
||||
# itself instead of its string representation.
|
||||
# For more background, see: https://github.com/open-telemetry/opentelemetry-python/pull/4216
|
||||
if not record.args and not isinstance(record.msg, str):
|
||||
# if record.msg is not a value we can export, cast it to string
|
||||
if not isinstance(record.msg, _VALID_ANY_VALUE_TYPES):
|
||||
body = str(record.msg)
|
||||
else:
|
||||
body = record.msg
|
||||
else:
|
||||
body = record.getMessage()
|
||||
|
||||
# related to https://github.com/open-telemetry/opentelemetry-python/issues/3548
|
||||
# Severity Text = WARN as defined in https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity.
|
||||
level_name = (
|
||||
"WARN" if record.levelname == "WARNING" else record.levelname
|
||||
)
|
||||
|
||||
logger = get_logger(record.name, logger_provider=self._logger_provider)
|
||||
return LogRecord(
|
||||
timestamp=timestamp,
|
||||
observed_timestamp=observered_timestamp,
|
||||
trace_id=span_context.trace_id,
|
||||
span_id=span_context.span_id,
|
||||
trace_flags=span_context.trace_flags,
|
||||
severity_text=level_name,
|
||||
severity_number=severity_number,
|
||||
body=body,
|
||||
resource=logger.resource,
|
||||
attributes=attributes,
|
||||
)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
"""
|
||||
Emit a record. Skip emitting if logger is NoOp.
|
||||
|
||||
The record is translated to OTel format, and then sent across the pipeline.
|
||||
"""
|
||||
logger = get_logger(record.name, logger_provider=self._logger_provider)
|
||||
if not isinstance(logger, NoOpLogger):
|
||||
logger.emit(self._translate(record))
|
||||
|
||||
def flush(self) -> None:
|
||||
"""
|
||||
Flushes the logging output. Skip flushing if logging_provider has no force_flush method.
|
||||
"""
|
||||
if hasattr(self._logger_provider, "force_flush") and callable(
|
||||
self._logger_provider.force_flush
|
||||
):
|
||||
self._logger_provider.force_flush()
|
||||
|
||||
|
||||
class Logger(APILogger):
|
||||
def __init__(
|
||||
self,
|
||||
resource: Resource,
|
||||
multi_log_record_processor: Union[
|
||||
SynchronousMultiLogRecordProcessor,
|
||||
ConcurrentMultiLogRecordProcessor,
|
||||
],
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
):
|
||||
super().__init__(
|
||||
instrumentation_scope.name,
|
||||
instrumentation_scope.version,
|
||||
instrumentation_scope.schema_url,
|
||||
instrumentation_scope.attributes,
|
||||
)
|
||||
self._resource = resource
|
||||
self._multi_log_record_processor = multi_log_record_processor
|
||||
self._instrumentation_scope = instrumentation_scope
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return self._resource
|
||||
|
||||
def emit(self, record: LogRecord):
|
||||
"""Emits the :class:`LogData` by associating :class:`LogRecord`
|
||||
and instrumentation info.
|
||||
"""
|
||||
log_data = LogData(record, self._instrumentation_scope)
|
||||
self._multi_log_record_processor.emit(log_data)
|
||||
|
||||
|
||||
class LoggerProvider(APILoggerProvider):
|
||||
def __init__(
|
||||
self,
|
||||
resource: Resource | None = None,
|
||||
shutdown_on_exit: bool = True,
|
||||
multi_log_record_processor: SynchronousMultiLogRecordProcessor
|
||||
| ConcurrentMultiLogRecordProcessor
|
||||
| None = None,
|
||||
):
|
||||
if resource is None:
|
||||
self._resource = Resource.create({})
|
||||
else:
|
||||
self._resource = resource
|
||||
self._multi_log_record_processor = (
|
||||
multi_log_record_processor or SynchronousMultiLogRecordProcessor()
|
||||
)
|
||||
disabled = environ.get(OTEL_SDK_DISABLED, "")
|
||||
self._disabled = disabled.lower().strip() == "true"
|
||||
self._at_exit_handler = None
|
||||
if shutdown_on_exit:
|
||||
self._at_exit_handler = atexit.register(self.shutdown)
|
||||
self._logger_cache = {}
|
||||
self._logger_cache_lock = Lock()
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return self._resource
|
||||
|
||||
def _get_logger_no_cache(
|
||||
self,
|
||||
name: str,
|
||||
version: str | None = None,
|
||||
schema_url: str | None = None,
|
||||
attributes: Attributes | None = None,
|
||||
) -> Logger:
|
||||
return Logger(
|
||||
self._resource,
|
||||
self._multi_log_record_processor,
|
||||
InstrumentationScope(
|
||||
name,
|
||||
version,
|
||||
schema_url,
|
||||
attributes,
|
||||
),
|
||||
)
|
||||
|
||||
def _get_logger_cached(
|
||||
self,
|
||||
name: str,
|
||||
version: str | None = None,
|
||||
schema_url: str | None = None,
|
||||
) -> Logger:
|
||||
with self._logger_cache_lock:
|
||||
key = (name, version, schema_url)
|
||||
if key in self._logger_cache:
|
||||
return self._logger_cache[key]
|
||||
|
||||
self._logger_cache[key] = self._get_logger_no_cache(
|
||||
name, version, schema_url
|
||||
)
|
||||
return self._logger_cache[key]
|
||||
|
||||
def get_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: str | None = None,
|
||||
schema_url: str | None = None,
|
||||
attributes: Attributes | None = None,
|
||||
) -> Logger:
|
||||
if self._disabled:
|
||||
return NoOpLogger(
|
||||
name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
if attributes is None:
|
||||
return self._get_logger_cached(name, version, schema_url)
|
||||
return self._get_logger_no_cache(name, version, schema_url, attributes)
|
||||
|
||||
def add_log_record_processor(
|
||||
self, log_record_processor: LogRecordProcessor
|
||||
):
|
||||
"""Registers a new :class:`LogRecordProcessor` for this `LoggerProvider` instance.
|
||||
|
||||
The log processors are invoked in the same order they are registered.
|
||||
"""
|
||||
self._multi_log_record_processor.add_log_record_processor(
|
||||
log_record_processor
|
||||
)
|
||||
|
||||
def shutdown(self):
|
||||
"""Shuts down the log processors."""
|
||||
self._multi_log_record_processor.shutdown()
|
||||
if self._at_exit_handler is not None:
|
||||
atexit.unregister(self._at_exit_handler)
|
||||
self._at_exit_handler = None
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Force flush the log processors.
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported.
|
||||
|
||||
Returns:
|
||||
True if all the log processors flushes the logs within timeout,
|
||||
False otherwise.
|
||||
"""
|
||||
return self._multi_log_record_processor.force_flush(timeout_millis)
|
||||
Binary file not shown.
@@ -0,0 +1,464 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import enum
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import weakref
|
||||
from os import environ, linesep
|
||||
from time import time_ns
|
||||
from typing import IO, Callable, Deque, List, Optional, Sequence
|
||||
|
||||
from opentelemetry.context import (
|
||||
_SUPPRESS_INSTRUMENTATION_KEY,
|
||||
attach,
|
||||
detach,
|
||||
set_value,
|
||||
)
|
||||
from opentelemetry.sdk._logs import LogData, LogRecord, LogRecordProcessor
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_BLRP_EXPORT_TIMEOUT,
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
|
||||
OTEL_BLRP_MAX_QUEUE_SIZE,
|
||||
OTEL_BLRP_SCHEDULE_DELAY,
|
||||
)
|
||||
from opentelemetry.util._once import Once
|
||||
|
||||
_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512
|
||||
_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000
|
||||
_DEFAULT_MAX_QUEUE_SIZE = 2048
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE = (
|
||||
"Unable to parse value for %s as integer. Defaulting to %s."
|
||||
)
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogExportResult(enum.Enum):
|
||||
SUCCESS = 0
|
||||
FAILURE = 1
|
||||
|
||||
|
||||
class LogExporter(abc.ABC):
|
||||
"""Interface for exporting logs.
|
||||
|
||||
Interface to be implemented by services that want to export logs received
|
||||
in their own format.
|
||||
|
||||
To export data this MUST be registered to the :class`opentelemetry.sdk._logs.Logger` using a
|
||||
log processor.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def export(self, batch: Sequence[LogData]):
|
||||
"""Exports a batch of logs.
|
||||
|
||||
Args:
|
||||
batch: The list of `LogData` objects to be exported
|
||||
|
||||
Returns:
|
||||
The result of the export
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def shutdown(self):
|
||||
"""Shuts down the exporter.
|
||||
|
||||
Called when the SDK is shut down.
|
||||
"""
|
||||
|
||||
|
||||
class ConsoleLogExporter(LogExporter):
|
||||
"""Implementation of :class:`LogExporter` that prints log records to the
|
||||
console.
|
||||
|
||||
This class can be used for diagnostic purposes. It prints the exported
|
||||
log records to the console STDOUT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
out: IO = sys.stdout,
|
||||
formatter: Callable[[LogRecord], str] = lambda record: record.to_json()
|
||||
+ linesep,
|
||||
):
|
||||
self.out = out
|
||||
self.formatter = formatter
|
||||
|
||||
def export(self, batch: Sequence[LogData]):
|
||||
for data in batch:
|
||||
self.out.write(self.formatter(data.log_record))
|
||||
self.out.flush()
|
||||
return LogExportResult.SUCCESS
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
|
||||
class SimpleLogRecordProcessor(LogRecordProcessor):
|
||||
"""This is an implementation of LogRecordProcessor which passes
|
||||
received logs in the export-friendly LogData representation to the
|
||||
configured LogExporter, as soon as they are emitted.
|
||||
"""
|
||||
|
||||
def __init__(self, exporter: LogExporter):
|
||||
self._exporter = exporter
|
||||
self._shutdown = False
|
||||
|
||||
def emit(self, log_data: LogData):
|
||||
if self._shutdown:
|
||||
_logger.warning("Processor is already shutdown, ignoring call")
|
||||
return
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
try:
|
||||
self._exporter.export((log_data,))
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.exception("Exception while exporting logs.")
|
||||
detach(token)
|
||||
|
||||
def shutdown(self):
|
||||
self._shutdown = True
|
||||
self._exporter.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool: # pylint: disable=no-self-use
|
||||
return True
|
||||
|
||||
|
||||
class _FlushRequest:
|
||||
__slots__ = ["event", "num_log_records"]
|
||||
|
||||
def __init__(self):
|
||||
self.event = threading.Event()
|
||||
self.num_log_records = 0
|
||||
|
||||
|
||||
_BSP_RESET_ONCE = Once()
|
||||
|
||||
|
||||
class BatchLogRecordProcessor(LogRecordProcessor):
|
||||
"""This is an implementation of LogRecordProcessor which creates batches of
|
||||
received logs in the export-friendly LogData representation and
|
||||
send to the configured LogExporter, as soon as they are emitted.
|
||||
|
||||
`BatchLogRecordProcessor` is configurable with the following environment
|
||||
variables which correspond to constructor parameters:
|
||||
|
||||
- :envvar:`OTEL_BLRP_SCHEDULE_DELAY`
|
||||
- :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE`
|
||||
- :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE`
|
||||
- :envvar:`OTEL_BLRP_EXPORT_TIMEOUT`
|
||||
"""
|
||||
|
||||
_queue: Deque[LogData]
|
||||
_flush_request: _FlushRequest | None
|
||||
_log_records: List[LogData | None]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
exporter: LogExporter,
|
||||
schedule_delay_millis: float | None = None,
|
||||
max_export_batch_size: int | None = None,
|
||||
export_timeout_millis: float | None = None,
|
||||
max_queue_size: int | None = None,
|
||||
):
|
||||
if max_queue_size is None:
|
||||
max_queue_size = BatchLogRecordProcessor._default_max_queue_size()
|
||||
|
||||
if schedule_delay_millis is None:
|
||||
schedule_delay_millis = (
|
||||
BatchLogRecordProcessor._default_schedule_delay_millis()
|
||||
)
|
||||
|
||||
if max_export_batch_size is None:
|
||||
max_export_batch_size = (
|
||||
BatchLogRecordProcessor._default_max_export_batch_size()
|
||||
)
|
||||
|
||||
if export_timeout_millis is None:
|
||||
export_timeout_millis = (
|
||||
BatchLogRecordProcessor._default_export_timeout_millis()
|
||||
)
|
||||
|
||||
BatchLogRecordProcessor._validate_arguments(
|
||||
max_queue_size, schedule_delay_millis, max_export_batch_size
|
||||
)
|
||||
|
||||
self._exporter = exporter
|
||||
self._max_queue_size = max_queue_size
|
||||
self._schedule_delay_millis = schedule_delay_millis
|
||||
self._max_export_batch_size = max_export_batch_size
|
||||
self._export_timeout_millis = export_timeout_millis
|
||||
self._queue = collections.deque([], max_queue_size)
|
||||
self._worker_thread = threading.Thread(
|
||||
name="OtelBatchLogRecordProcessor",
|
||||
target=self.worker,
|
||||
daemon=True,
|
||||
)
|
||||
self._condition = threading.Condition(threading.Lock())
|
||||
self._shutdown = False
|
||||
self._flush_request = None
|
||||
self._log_records = [None] * self._max_export_batch_size
|
||||
self._worker_thread.start()
|
||||
if hasattr(os, "register_at_fork"):
|
||||
weak_reinit = weakref.WeakMethod(self._at_fork_reinit)
|
||||
os.register_at_fork(after_in_child=lambda: weak_reinit()()) # pylint: disable=unnecessary-lambda
|
||||
self._pid = os.getpid()
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
self._condition = threading.Condition(threading.Lock())
|
||||
self._queue.clear()
|
||||
self._worker_thread = threading.Thread(
|
||||
name="OtelBatchLogRecordProcessor",
|
||||
target=self.worker,
|
||||
daemon=True,
|
||||
)
|
||||
self._worker_thread.start()
|
||||
self._pid = os.getpid()
|
||||
|
||||
def worker(self):
|
||||
timeout = self._schedule_delay_millis / 1e3
|
||||
flush_request: Optional[_FlushRequest] = None
|
||||
while not self._shutdown:
|
||||
with self._condition:
|
||||
if self._shutdown:
|
||||
# shutdown may have been called, avoid further processing
|
||||
break
|
||||
flush_request = self._get_and_unset_flush_request()
|
||||
if (
|
||||
len(self._queue) < self._max_export_batch_size
|
||||
and flush_request is None
|
||||
):
|
||||
self._condition.wait(timeout)
|
||||
|
||||
flush_request = self._get_and_unset_flush_request()
|
||||
if not self._queue:
|
||||
timeout = self._schedule_delay_millis / 1e3
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
flush_request = None
|
||||
continue
|
||||
if self._shutdown:
|
||||
break
|
||||
|
||||
start_ns = time_ns()
|
||||
self._export(flush_request)
|
||||
end_ns = time_ns()
|
||||
# subtract the duration of this export call to the next timeout
|
||||
timeout = self._schedule_delay_millis / 1e3 - (
|
||||
(end_ns - start_ns) / 1e9
|
||||
)
|
||||
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
flush_request = None
|
||||
|
||||
# there might have been a new flush request while export was running
|
||||
# and before the done flag switched to true
|
||||
with self._condition:
|
||||
shutdown_flush_request = self._get_and_unset_flush_request()
|
||||
|
||||
# flush the remaining logs
|
||||
self._drain_queue()
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
self._notify_flush_request_finished(shutdown_flush_request)
|
||||
|
||||
def _export(self, flush_request: Optional[_FlushRequest] = None):
|
||||
"""Exports logs considering the given flush_request.
|
||||
|
||||
If flush_request is not None then logs are exported in batches
|
||||
until the number of exported logs reached or exceeded the num of logs in
|
||||
flush_request, otherwise exports at max max_export_batch_size logs.
|
||||
"""
|
||||
if flush_request is None:
|
||||
self._export_batch()
|
||||
return
|
||||
|
||||
num_log_records = flush_request.num_log_records
|
||||
while self._queue:
|
||||
exported = self._export_batch()
|
||||
num_log_records -= exported
|
||||
|
||||
if num_log_records <= 0:
|
||||
break
|
||||
|
||||
def _export_batch(self) -> int:
|
||||
"""Exports at most max_export_batch_size logs and returns the number of
|
||||
exported logs.
|
||||
"""
|
||||
idx = 0
|
||||
while idx < self._max_export_batch_size and self._queue:
|
||||
record = self._queue.pop()
|
||||
self._log_records[idx] = record
|
||||
idx += 1
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
try:
|
||||
self._exporter.export(self._log_records[:idx]) # type: ignore
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.exception("Exception while exporting logs.")
|
||||
detach(token)
|
||||
|
||||
for index in range(idx):
|
||||
self._log_records[index] = None
|
||||
return idx
|
||||
|
||||
def _drain_queue(self):
|
||||
"""Export all elements until queue is empty.
|
||||
|
||||
Can only be called from the worker thread context because it invokes
|
||||
`export` that is not thread safe.
|
||||
"""
|
||||
while self._queue:
|
||||
self._export_batch()
|
||||
|
||||
def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]:
|
||||
flush_request = self._flush_request
|
||||
self._flush_request = None
|
||||
if flush_request is not None:
|
||||
flush_request.num_log_records = len(self._queue)
|
||||
return flush_request
|
||||
|
||||
@staticmethod
|
||||
def _notify_flush_request_finished(
|
||||
flush_request: Optional[_FlushRequest] = None,
|
||||
):
|
||||
if flush_request is not None:
|
||||
flush_request.event.set()
|
||||
|
||||
def _get_or_create_flush_request(self) -> _FlushRequest:
|
||||
if self._flush_request is None:
|
||||
self._flush_request = _FlushRequest()
|
||||
return self._flush_request
|
||||
|
||||
def emit(self, log_data: LogData) -> None:
|
||||
"""Adds the `LogData` to queue and notifies the waiting threads
|
||||
when size of queue reaches max_export_batch_size.
|
||||
"""
|
||||
if self._shutdown:
|
||||
return
|
||||
if self._pid != os.getpid():
|
||||
_BSP_RESET_ONCE.do_once(self._at_fork_reinit)
|
||||
|
||||
self._queue.appendleft(log_data)
|
||||
if len(self._queue) >= self._max_export_batch_size:
|
||||
with self._condition:
|
||||
self._condition.notify()
|
||||
|
||||
def shutdown(self):
|
||||
self._shutdown = True
|
||||
with self._condition:
|
||||
self._condition.notify_all()
|
||||
self._worker_thread.join()
|
||||
self._exporter.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: Optional[int] = None) -> bool:
|
||||
if timeout_millis is None:
|
||||
timeout_millis = self._export_timeout_millis
|
||||
if self._shutdown:
|
||||
return True
|
||||
|
||||
with self._condition:
|
||||
flush_request = self._get_or_create_flush_request()
|
||||
self._condition.notify_all()
|
||||
|
||||
ret = flush_request.event.wait(timeout_millis / 1e3)
|
||||
if not ret:
|
||||
_logger.warning("Timeout was exceeded in force_flush().")
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def _default_max_queue_size():
|
||||
try:
|
||||
return int(
|
||||
environ.get(OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_MAX_QUEUE_SIZE,
|
||||
_DEFAULT_MAX_QUEUE_SIZE,
|
||||
)
|
||||
return _DEFAULT_MAX_QUEUE_SIZE
|
||||
|
||||
@staticmethod
|
||||
def _default_schedule_delay_millis():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BLRP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_SCHEDULE_DELAY,
|
||||
_DEFAULT_SCHEDULE_DELAY_MILLIS,
|
||||
)
|
||||
return _DEFAULT_SCHEDULE_DELAY_MILLIS
|
||||
|
||||
@staticmethod
|
||||
def _default_max_export_batch_size():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE,
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE,
|
||||
)
|
||||
return _DEFAULT_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
@staticmethod
|
||||
def _default_export_timeout_millis():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BLRP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_EXPORT_TIMEOUT,
|
||||
_DEFAULT_EXPORT_TIMEOUT_MILLIS,
|
||||
)
|
||||
return _DEFAULT_EXPORT_TIMEOUT_MILLIS
|
||||
|
||||
@staticmethod
|
||||
def _validate_arguments(
|
||||
max_queue_size, schedule_delay_millis, max_export_batch_size
|
||||
):
|
||||
if max_queue_size <= 0:
|
||||
raise ValueError("max_queue_size must be a positive integer.")
|
||||
|
||||
if schedule_delay_millis <= 0:
|
||||
raise ValueError("schedule_delay_millis must be positive.")
|
||||
|
||||
if max_export_batch_size <= 0:
|
||||
raise ValueError(
|
||||
"max_export_batch_size must be a positive integer."
|
||||
)
|
||||
|
||||
if max_export_batch_size > max_queue_size:
|
||||
raise ValueError(
|
||||
"max_export_batch_size must be less than or equal to max_queue_size."
|
||||
)
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,51 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
import typing
|
||||
|
||||
from opentelemetry.sdk._logs import LogData
|
||||
from opentelemetry.sdk._logs.export import LogExporter, LogExportResult
|
||||
|
||||
|
||||
class InMemoryLogExporter(LogExporter):
|
||||
"""Implementation of :class:`.LogExporter` that stores logs in memory.
|
||||
|
||||
This class can be used for testing purposes. It stores the exported logs
|
||||
in a list in memory that can be retrieved using the
|
||||
:func:`.get_finished_logs` method.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._logs = []
|
||||
self._lock = threading.Lock()
|
||||
self._stopped = False
|
||||
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
self._logs.clear()
|
||||
|
||||
def get_finished_logs(self) -> typing.Tuple[LogData, ...]:
|
||||
with self._lock:
|
||||
return tuple(self._logs)
|
||||
|
||||
def export(self, batch: typing.Sequence[LogData]) -> LogExportResult:
|
||||
if self._stopped:
|
||||
return LogExportResult.FAILURE
|
||||
with self._lock:
|
||||
self._logs.extend(batch)
|
||||
return LogExportResult.SUCCESS
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._stopped = True
|
||||
@@ -0,0 +1,35 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from opentelemetry.sdk._logs._internal.export import (
|
||||
BatchLogRecordProcessor,
|
||||
ConsoleLogExporter,
|
||||
LogExporter,
|
||||
LogExportResult,
|
||||
SimpleLogRecordProcessor,
|
||||
)
|
||||
|
||||
# The point module is not in the export directory to avoid a circular import.
|
||||
from opentelemetry.sdk._logs._internal.export.in_memory_log_exporter import (
|
||||
InMemoryLogExporter,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"BatchLogRecordProcessor",
|
||||
"ConsoleLogExporter",
|
||||
"LogExporter",
|
||||
"LogExportResult",
|
||||
"SimpleLogRecordProcessor",
|
||||
"InMemoryLogExporter",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,721 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OTEL_SDK_DISABLED = "OTEL_SDK_DISABLED"
|
||||
"""
|
||||
.. envvar:: OTEL_SDK_DISABLED
|
||||
|
||||
The :envvar:`OTEL_SDK_DISABLED` environment variable disables the SDK for all signals
|
||||
Default: "false"
|
||||
"""
|
||||
|
||||
OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES"
|
||||
"""
|
||||
.. envvar:: OTEL_RESOURCE_ATTRIBUTES
|
||||
|
||||
The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource
|
||||
attributes to be passed to the SDK at process invocation. The attributes from
|
||||
:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to
|
||||
`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*
|
||||
priority. Attributes should be in the format ``key1=value1,key2=value2``.
|
||||
Additional details are available `in the specification
|
||||
<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`__.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF
|
||||
import pprint
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes)
|
||||
EOF
|
||||
{'service.name': 'shoppingcard',
|
||||
'telemetry.sdk.language': 'python',
|
||||
'telemetry.sdk.name': 'opentelemetry',
|
||||
'telemetry.sdk.version': '0.13.dev0',
|
||||
'will_be_overridden': 'bar'}
|
||||
"""
|
||||
|
||||
OTEL_LOG_LEVEL = "OTEL_LOG_LEVEL"
|
||||
"""
|
||||
.. envvar:: OTEL_LOG_LEVEL
|
||||
|
||||
The :envvar:`OTEL_LOG_LEVEL` environment variable sets the log level used by the SDK logger
|
||||
Default: "info"
|
||||
"""
|
||||
|
||||
OTEL_TRACES_SAMPLER = "OTEL_TRACES_SAMPLER"
|
||||
"""
|
||||
.. envvar:: OTEL_TRACES_SAMPLER
|
||||
|
||||
The :envvar:`OTEL_TRACES_SAMPLER` environment variable sets the sampler to be used for traces.
|
||||
Sampling is a mechanism to control the noise introduced by OpenTelemetry by reducing the number
|
||||
of traces collected and sent to the backend
|
||||
Default: "parentbased_always_on"
|
||||
"""
|
||||
|
||||
OTEL_TRACES_SAMPLER_ARG = "OTEL_TRACES_SAMPLER_ARG"
|
||||
"""
|
||||
.. envvar:: OTEL_TRACES_SAMPLER_ARG
|
||||
|
||||
The :envvar:`OTEL_TRACES_SAMPLER_ARG` environment variable will only be used if OTEL_TRACES_SAMPLER is set.
|
||||
Each Sampler type defines its own expected input, if any.
|
||||
Invalid or unrecognized input is ignored,
|
||||
i.e. the SDK behaves as if OTEL_TRACES_SAMPLER_ARG is not set.
|
||||
"""
|
||||
|
||||
OTEL_BLRP_SCHEDULE_DELAY = "OTEL_BLRP_SCHEDULE_DELAY"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_SCHEDULE_DELAY
|
||||
|
||||
The :envvar:`OTEL_BLRP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchLogRecordProcessor.
|
||||
Default: 5000
|
||||
"""
|
||||
|
||||
OTEL_BLRP_EXPORT_TIMEOUT = "OTEL_BLRP_EXPORT_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_EXPORT_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchLogRecordProcessor.
|
||||
Default: 30000
|
||||
"""
|
||||
|
||||
OTEL_BLRP_MAX_QUEUE_SIZE = "OTEL_BLRP_MAX_QUEUE_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_MAX_QUEUE_SIZE
|
||||
|
||||
The :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchLogRecordProcessor.
|
||||
Default: 2048
|
||||
"""
|
||||
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
The :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchLogRecordProcessor.
|
||||
Default: 512
|
||||
"""
|
||||
|
||||
OTEL_BSP_SCHEDULE_DELAY = "OTEL_BSP_SCHEDULE_DELAY"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_SCHEDULE_DELAY
|
||||
|
||||
The :envvar:`OTEL_BSP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchSpanProcessor.
|
||||
Default: 5000
|
||||
"""
|
||||
|
||||
OTEL_BSP_EXPORT_TIMEOUT = "OTEL_BSP_EXPORT_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_EXPORT_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_BSP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchSpanProcessor.
|
||||
Default: 30000
|
||||
"""
|
||||
|
||||
OTEL_BSP_MAX_QUEUE_SIZE = "OTEL_BSP_MAX_QUEUE_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_MAX_QUEUE_SIZE
|
||||
|
||||
The :envvar:`OTEL_BSP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchSpanProcessor.
|
||||
Default: 2048
|
||||
"""
|
||||
|
||||
OTEL_BSP_MAX_EXPORT_BATCH_SIZE = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
The :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchSpanProcessor.
|
||||
Default: 512
|
||||
"""
|
||||
|
||||
OTEL_ATTRIBUTE_COUNT_LIMIT = "OTEL_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed attribute count for spans, events and links.
|
||||
This limit is overridden by model specific limits such as OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT
|
||||
|
||||
The :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed attribute length.
|
||||
"""
|
||||
|
||||
OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed event attribute count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed link attribute count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed span attribute count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT = (
|
||||
"OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed length
|
||||
span attribute values can have. This takes precedence over :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT`.
|
||||
"""
|
||||
|
||||
OTEL_SPAN_EVENT_COUNT_LIMIT = "OTEL_SPAN_EVENT_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_EVENT_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_EVENT_COUNT_LIMIT` represents the maximum allowed span event count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_SPAN_LINK_COUNT_LIMIT = "OTEL_SPAN_LINK_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_LINK_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_LINK_COUNT_LIMIT` represents the maximum allowed span link count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_AGENT_HOST = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_HOST
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_HOST` represents the hostname for the Jaeger agent.
|
||||
Default: "localhost"
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_AGENT_PORT = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_PORT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_PORT` represents the port for the Jaeger agent.
|
||||
Default: 6831
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_ENDPOINT = "OTEL_EXPORTER_JAEGER_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_ENDPOINT` represents the HTTP endpoint for Jaeger traces.
|
||||
Default: "http://localhost:14250"
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_USER = "OTEL_EXPORTER_JAEGER_USER"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_USER
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_USER` represents the username to be used for HTTP basic authentication.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_PASSWORD = "OTEL_EXPORTER_JAEGER_PASSWORD"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_PASSWORD
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_PASSWORD` represents the password to be used for HTTP basic authentication.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_TIMEOUT = "OTEL_EXPORTER_JAEGER_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_TIMEOUT
|
||||
|
||||
Maximum time the Jaeger exporter will wait for each batch export.
|
||||
Default: 10
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_ZIPKIN_ENDPOINT = "OTEL_EXPORTER_ZIPKIN_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_ZIPKIN_ENDPOINT
|
||||
|
||||
Zipkin collector endpoint to which the exporter will send data. This may
|
||||
include a path (e.g. ``http://example.com:9411/api/v2/spans``).
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_ZIPKIN_TIMEOUT = "OTEL_EXPORTER_ZIPKIN_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_ZIPKIN_TIMEOUT
|
||||
|
||||
Maximum time (in seconds) the Zipkin exporter will wait for each batch export.
|
||||
Default: 10
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL = "OTEL_EXPORTER_OTLP_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL` represents the transport protocol for the
|
||||
OTLP exporter.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_PROTOCOL = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` represents the transport protocol for spans.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_PROTOCOL = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_PROTOCOL` represents the transport protocol for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_PROTOCOL = "OTEL_EXPORTER_OTLP_LOGS_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_PROTOCOL` represents the transport protocol for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_CERTIFICATE = "OTEL_EXPORTER_OTLP_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client. Should only be used for a secure connection.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_HEADERS = "OTEL_EXPORTER_OTLP_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_HEADERS` contains the key-value pairs to be used as headers
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
|
||||
OTEL_EXPORTER_OTLP_COMPRESSION = "OTEL_EXPORTER_OTLP_COMPRESSION"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_COMPRESSION
|
||||
|
||||
Specifies a gRPC compression method to be used in the OTLP exporters.
|
||||
Possible values are:
|
||||
|
||||
- ``gzip`` corresponding to `grpc.Compression.Gzip`.
|
||||
- ``deflate`` corresponding to `grpc.Compression.Deflate`.
|
||||
|
||||
If no ``OTEL_EXPORTER_OTLP_*COMPRESSION`` environment variable is present or
|
||||
``compression`` argument passed to the exporter, the default
|
||||
`grpc.Compression.NoCompression` will be used. Additional details are
|
||||
available `in the specification
|
||||
<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#opentelemetry-protocol-exporter>`__.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TIMEOUT = "OTEL_EXPORTER_OTLP_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export.
|
||||
Default: 10
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT = "OTEL_EXPORTER_OTLP_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` target to which the exporter is going to send spans or metrics.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over the insecure configuration setting.
|
||||
Default: "http://localhost:4317"
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_INSECURE = "OTEL_EXPORTER_OTLP_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_INSECURE` represents whether to enable client transport security for gRPC requests.
|
||||
A scheme of https takes precedence over this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_INSECURE = "OTEL_EXPORTER_OTLP_TRACES_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_INSECURE` represents whether to enable client transport security
|
||||
for gRPC requests for spans. A scheme of https takes precedence over the this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` target to which the span exporter is going to send spans.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over this configuration setting.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metrics exporter is going to send metrics.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over this configuration setting.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT = "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_ENDPOINT` target to which the log exporter is going to send logs.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over this configuration setting.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE = "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for traces. Should only be used for a secure connection for tracing.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for metrics. Should only be used for a secure connection for exporting metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_CLIENT_KEY = "OTEL_EXPORTER_OTLP_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY = "OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format for traces.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE = "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format for traces.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_HEADERS = "OTEL_EXPORTER_OTLP_TRACES_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` contains the key-value pairs to be used as headers for spans
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_HEADERS = "OTEL_EXPORTER_OTLP_LOGS_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_HEADERS` contains the key-value pairs to be used as headers for logs
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_COMPRESSION = "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_COMPRESSION
|
||||
|
||||
Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the span
|
||||
exporter. If both are present, this takes higher precedence.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION
|
||||
|
||||
Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric
|
||||
exporter. If both are present, this takes higher precedence.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_COMPRESSION = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_COMPRESSION
|
||||
|
||||
Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the log
|
||||
exporter. If both are present, this takes higher precedence.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` is the maximum time the OTLP exporter will
|
||||
wait for each batch export for spans.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will
|
||||
wait for each batch export for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_INSECURE = "OTEL_EXPORTER_OTLP_METRICS_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_INSECURE` represents whether to enable client transport security
|
||||
for gRPC requests for metrics. A scheme of https takes precedence over the this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_INSECURE = "OTEL_EXPORTER_OTLP_LOGS_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_INSECURE` represents whether to enable client transport security
|
||||
for gRPC requests for logs. A scheme of https takes precedence over the this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE = "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for logs. Should only be used for a secure connection for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_TIMEOUT = "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` is the maximum time the OTLP exporter will
|
||||
wait for each batch export for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_CERTIFICATE = "OTEL_EXPORTER_JAEGER_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for Jaeger. Should only be used for a secure connection with Jaeger.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES = (
|
||||
"OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES` is a boolean flag to determine whether
|
||||
to split a large span batch to admire the udp packet size limit.
|
||||
"""
|
||||
|
||||
OTEL_SERVICE_NAME = "OTEL_SERVICE_NAME"
|
||||
"""
|
||||
.. envvar:: OTEL_SERVICE_NAME
|
||||
|
||||
Convenience environment variable for setting the service name resource attribute.
|
||||
The following two environment variables have the same effect
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
OTEL_SERVICE_NAME=my-python-service
|
||||
|
||||
OTEL_RESOURCE_ATTRIBUTES=service.name=my-python-service
|
||||
|
||||
|
||||
If both are set, :envvar:`OTEL_SERVICE_NAME` takes precedence.
|
||||
"""
|
||||
|
||||
|
||||
_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED = (
|
||||
"OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED
|
||||
|
||||
The :envvar:`OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED` environment variable allows users to
|
||||
enable/disable the auto instrumentation for the python logging module.
|
||||
Default: False
|
||||
|
||||
Note: Logs SDK and its related settings are experimental.
|
||||
"""
|
||||
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment
|
||||
variable allows users to set the default aggregation temporality policy to use
|
||||
on the basis of instrument kind. The valid (case-insensitive) values are:
|
||||
|
||||
``CUMULATIVE``: Use ``CUMULATIVE`` aggregation temporality for all instrument kinds.
|
||||
``DELTA``: Use ``DELTA`` aggregation temporality for ``Counter``, ``Asynchronous Counter`` and ``Histogram``.
|
||||
Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter`` and ``Asynchronous UpDownCounter``.
|
||||
``LOWMEMORY``: Use ``DELTA`` aggregation temporality for ``Counter`` and ``Histogram``.
|
||||
Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter``, ``AsynchronousCounter`` and ``Asynchronous UpDownCounter``.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_GRPC_INSECURE = "OTEL_EXPORTER_JAEGER_GRPC_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_GRPC_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_GRPC_INSECURE` is a boolean flag to True if collector has no encryption or authentication.
|
||||
"""
|
||||
|
||||
OTEL_METRIC_EXPORT_INTERVAL = "OTEL_METRIC_EXPORT_INTERVAL"
|
||||
"""
|
||||
.. envvar:: OTEL_METRIC_EXPORT_INTERVAL
|
||||
|
||||
The :envvar:`OTEL_METRIC_EXPORT_INTERVAL` is the time interval (in milliseconds) between the start of two export attempts.
|
||||
"""
|
||||
|
||||
OTEL_METRIC_EXPORT_TIMEOUT = "OTEL_METRIC_EXPORT_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_METRIC_EXPORT_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_METRIC_EXPORT_TIMEOUT` is the maximum allowed time (in milliseconds) to export data.
|
||||
"""
|
||||
|
||||
OTEL_METRICS_EXEMPLAR_FILTER = "OTEL_METRICS_EXEMPLAR_FILTER"
|
||||
"""
|
||||
.. envvar:: OTEL_METRICS_EXEMPLAR_FILTER
|
||||
|
||||
The :envvar:`OTEL_METRICS_EXEMPLAR_FILTER` is the filter for which measurements can become Exemplars.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` is the default aggregation to use for histogram instruments.
|
||||
"""
|
||||
|
||||
OTEL_EXPERIMENTAL_RESOURCE_DETECTORS = "OTEL_EXPERIMENTAL_RESOURCE_DETECTORS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPERIMENTAL_RESOURCE_DETECTORS
|
||||
|
||||
The :envvar:`OTEL_EXPERIMENTAL_RESOURCE_DETECTORS` is a comma-separated string
|
||||
of names of resource detectors. These names must be the same as the names of
|
||||
entry points for the ```opentelemetry_resource_detector``` entry point. This is an
|
||||
experimental feature and the name of this variable and its behavior can change
|
||||
in a non-backwards compatible way.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_PROMETHEUS_HOST = "OTEL_EXPORTER_PROMETHEUS_HOST"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_PROMETHEUS_HOST
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_PROMETHEUS_HOST` environment variable configures the host used by
|
||||
the Prometheus exporter.
|
||||
Default: "localhost"
|
||||
|
||||
This is an experimental environment variable and the name of this variable and its behavior can
|
||||
change in a non-backwards compatible way.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_PROMETHEUS_PORT = "OTEL_EXPORTER_PROMETHEUS_PORT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_PROMETHEUS_PORT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_PROMETHEUS_PORT` environment variable configures the port used by
|
||||
the Prometheus exporter.
|
||||
Default: 9464
|
||||
|
||||
This is an experimental environment variable and the name of this variable and its behavior can
|
||||
change in a non-backwards compatible way.
|
||||
"""
|
||||
Binary file not shown.
@@ -0,0 +1,142 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Global Error Handler
|
||||
|
||||
This module provides a global error handler and an interface that allows
|
||||
error handlers to be registered with the global error handler via entry points.
|
||||
A default error handler is also provided.
|
||||
|
||||
To use this feature, users can create an error handler that is registered
|
||||
using the ``opentelemetry_error_handler`` entry point. A class is to be
|
||||
registered in this entry point, this class must inherit from the
|
||||
``opentelemetry.sdk.error_handler.ErrorHandler`` class and implement the
|
||||
corresponding ``handle`` method. This method will receive the exception object
|
||||
that is to be handled. The error handler class should also inherit from the
|
||||
exception classes it wants to handle. For example, this would be an error
|
||||
handler that handles ``ZeroDivisionError``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from opentelemetry.sdk.error_handler import ErrorHandler
|
||||
from logging import getLogger
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorHandler0(ErrorHandler, ZeroDivisionError):
|
||||
|
||||
def _handle(self, error: Exception, *args, **kwargs):
|
||||
|
||||
logger.exception("ErrorHandler0 handling a ZeroDivisionError")
|
||||
|
||||
To use the global error handler, just instantiate it as a context manager where
|
||||
you want exceptions to be handled:
|
||||
|
||||
|
||||
.. code:: python
|
||||
|
||||
from opentelemetry.sdk.error_handler import GlobalErrorHandler
|
||||
|
||||
with GlobalErrorHandler():
|
||||
1 / 0
|
||||
|
||||
If the class of the exception raised in the scope of the ``GlobalErrorHandler``
|
||||
object is not parent of any registered error handler, then the default error
|
||||
handler will handle the exception. This default error handler will only log the
|
||||
exception to standard logging, the exception won't be raised any further.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from logging import getLogger
|
||||
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorHandler(ABC):
|
||||
@abstractmethod
|
||||
def _handle(self, error: Exception, *args, **kwargs):
|
||||
"""
|
||||
Handle an exception
|
||||
"""
|
||||
|
||||
|
||||
class _DefaultErrorHandler(ErrorHandler):
|
||||
"""
|
||||
Default error handler
|
||||
|
||||
This error handler just logs the exception using standard logging.
|
||||
"""
|
||||
|
||||
# pylint: disable=useless-return
|
||||
def _handle(self, error: Exception, *args, **kwargs):
|
||||
logger.exception("Error handled by default error handler: ")
|
||||
return None
|
||||
|
||||
|
||||
class GlobalErrorHandler:
|
||||
"""
|
||||
Global error handler
|
||||
|
||||
This is a singleton class that can be instantiated anywhere to get the
|
||||
global error handler. This object provides a handle method that receives
|
||||
an exception object that will be handled by the registered error handlers.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
|
||||
def __new__(cls) -> "GlobalErrorHandler":
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if exc_value is None:
|
||||
return None
|
||||
|
||||
plugin_handled = False
|
||||
|
||||
error_handler_entry_points = entry_points(
|
||||
group="opentelemetry_error_handler"
|
||||
)
|
||||
|
||||
for error_handler_entry_point in error_handler_entry_points:
|
||||
error_handler_class = error_handler_entry_point.load()
|
||||
|
||||
if issubclass(error_handler_class, exc_value.__class__):
|
||||
try:
|
||||
error_handler_class()._handle(exc_value)
|
||||
plugin_handled = True
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error_handling_error:
|
||||
logger.exception(
|
||||
"%s error while handling error %s by error handler %s",
|
||||
error_handling_error.__class__.__name__,
|
||||
exc_value.__class__.__name__,
|
||||
error_handler_class.__name__,
|
||||
)
|
||||
|
||||
if not plugin_handled:
|
||||
_DefaultErrorHandler()._handle(exc_value)
|
||||
|
||||
return True
|
||||
Binary file not shown.
@@ -0,0 +1,57 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from opentelemetry.sdk.metrics._internal import Meter, MeterProvider
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlignedHistogramBucketExemplarReservoir,
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
Exemplar,
|
||||
ExemplarFilter,
|
||||
ExemplarReservoir,
|
||||
SimpleFixedSizeExemplarReservoir,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
Counter,
|
||||
Histogram,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge
|
||||
|
||||
__all__ = [
|
||||
"AlignedHistogramBucketExemplarReservoir",
|
||||
"AlwaysOnExemplarFilter",
|
||||
"AlwaysOffExemplarFilter",
|
||||
"Exemplar",
|
||||
"ExemplarFilter",
|
||||
"ExemplarReservoir",
|
||||
"Meter",
|
||||
"MeterProvider",
|
||||
"MetricsTimeoutError",
|
||||
"Counter",
|
||||
"Histogram",
|
||||
"_Gauge",
|
||||
"ObservableCounter",
|
||||
"ObservableGauge",
|
||||
"ObservableUpDownCounter",
|
||||
"SimpleFixedSizeExemplarReservoir",
|
||||
"UpDownCounter",
|
||||
"TraceBasedExemplarFilter",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,582 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import weakref
|
||||
from atexit import register, unregister
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Optional, Sequence
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
from opentelemetry.metrics import Counter as APICounter
|
||||
from opentelemetry.metrics import Histogram as APIHistogram
|
||||
from opentelemetry.metrics import Meter as APIMeter
|
||||
from opentelemetry.metrics import MeterProvider as APIMeterProvider
|
||||
from opentelemetry.metrics import NoOpMeter
|
||||
from opentelemetry.metrics import ObservableCounter as APIObservableCounter
|
||||
from opentelemetry.metrics import ObservableGauge as APIObservableGauge
|
||||
from opentelemetry.metrics import (
|
||||
ObservableUpDownCounter as APIObservableUpDownCounter,
|
||||
)
|
||||
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
|
||||
from opentelemetry.metrics import _Gauge as APIGauge
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_METRICS_EXEMPLAR_FILTER,
|
||||
OTEL_SDK_DISABLED,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
ExemplarFilter,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
_Counter,
|
||||
_Gauge,
|
||||
_Histogram,
|
||||
_ObservableCounter,
|
||||
_ObservableGauge,
|
||||
_ObservableUpDownCounter,
|
||||
_UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.measurement_consumer import (
|
||||
MeasurementConsumer,
|
||||
SynchronousMeasurementConsumer,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
|
||||
SdkConfiguration,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.util._once import Once
|
||||
from opentelemetry.util.types import (
|
||||
Attributes,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class Meter(APIMeter):
|
||||
"""See `opentelemetry.metrics.Meter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: MeasurementConsumer,
|
||||
):
|
||||
super().__init__(
|
||||
name=instrumentation_scope.name,
|
||||
version=instrumentation_scope.version,
|
||||
schema_url=instrumentation_scope.schema_url,
|
||||
)
|
||||
self._instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
self._instrument_id_instrument = {}
|
||||
self._instrument_id_instrument_lock = Lock()
|
||||
|
||||
def create_counter(self, name, unit="", description="") -> APICounter:
|
||||
status = self._register_instrument(name, _Counter, unit, description)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APICounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Counter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_up_down_counter(
|
||||
self, name, unit="", description=""
|
||||
) -> APIUpDownCounter:
|
||||
status = self._register_instrument(
|
||||
name, _UpDownCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _UpDownCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_counter(
|
||||
self,
|
||||
name,
|
||||
callbacks=None,
|
||||
unit="",
|
||||
description="",
|
||||
) -> APIObservableCounter:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_histogram(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
*,
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> APIHistogram:
|
||||
if explicit_bucket_boundaries_advisory is not None:
|
||||
invalid_advisory = False
|
||||
if isinstance(explicit_bucket_boundaries_advisory, Sequence):
|
||||
try:
|
||||
invalid_advisory = not (
|
||||
all(
|
||||
isinstance(e, (float, int))
|
||||
for e in explicit_bucket_boundaries_advisory
|
||||
)
|
||||
)
|
||||
except (KeyError, TypeError):
|
||||
invalid_advisory = True
|
||||
else:
|
||||
invalid_advisory = True
|
||||
|
||||
if invalid_advisory:
|
||||
explicit_bucket_boundaries_advisory = None
|
||||
_logger.warning(
|
||||
"explicit_bucket_boundaries_advisory must be a sequence of numbers"
|
||||
)
|
||||
|
||||
status = self._register_instrument(
|
||||
name,
|
||||
_Histogram,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIHistogram.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Histogram(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_gauge(self, name, unit="", description="") -> APIGauge:
|
||||
status = self._register_instrument(name, _Gauge, unit, description)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Gauge(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_gauge(
|
||||
self, name, callbacks=None, unit="", description=""
|
||||
) -> APIObservableGauge:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableGauge, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableGauge(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_up_down_counter(
|
||||
self, name, callbacks=None, unit="", description=""
|
||||
) -> APIObservableUpDownCounter:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableUpDownCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableUpDownCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
|
||||
def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter:
|
||||
if exemplar_filter == "trace_based":
|
||||
return TraceBasedExemplarFilter()
|
||||
if exemplar_filter == "always_on":
|
||||
return AlwaysOnExemplarFilter()
|
||||
if exemplar_filter == "always_off":
|
||||
return AlwaysOffExemplarFilter()
|
||||
msg = f"Unknown exemplar filter '{exemplar_filter}'."
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class MeterProvider(APIMeterProvider):
|
||||
r"""See `opentelemetry.metrics.MeterProvider`.
|
||||
|
||||
Args:
|
||||
metric_readers: Register metric readers to collect metrics from the SDK
|
||||
on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is
|
||||
completely independent and will collect separate streams of
|
||||
metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push
|
||||
exporters here.
|
||||
resource: The resource representing what the metrics emitted from the SDK pertain to.
|
||||
shutdown_on_exit: If true, registers an `atexit` handler to call
|
||||
`MeterProvider.shutdown`
|
||||
views: The views to configure the metric output the SDK
|
||||
|
||||
By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s
|
||||
are provided) will report metrics with the default aggregation for the
|
||||
instrument's kind. To disable instruments by default, configure a match-all
|
||||
:class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable
|
||||
individual instruments:
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Disable default views
|
||||
|
||||
MeterProvider(
|
||||
views=[
|
||||
View(instrument_name="*", aggregation=DropAggregation()),
|
||||
View(instrument_name="mycounter"),
|
||||
],
|
||||
# ...
|
||||
)
|
||||
"""
|
||||
|
||||
_all_metric_readers_lock = Lock()
|
||||
_all_metric_readers = weakref.WeakSet()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric_readers: Sequence[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader"
|
||||
] = (),
|
||||
resource: Optional[Resource] = None,
|
||||
exemplar_filter: Optional[ExemplarFilter] = None,
|
||||
shutdown_on_exit: bool = True,
|
||||
views: Sequence["opentelemetry.sdk.metrics.view.View"] = (),
|
||||
):
|
||||
self._lock = Lock()
|
||||
self._meter_lock = Lock()
|
||||
self._atexit_handler = None
|
||||
if resource is None:
|
||||
resource = Resource.create({})
|
||||
self._sdk_config = SdkConfiguration(
|
||||
exemplar_filter=(
|
||||
exemplar_filter
|
||||
or _get_exemplar_filter(
|
||||
environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based")
|
||||
)
|
||||
),
|
||||
resource=resource,
|
||||
metric_readers=metric_readers,
|
||||
views=views,
|
||||
)
|
||||
self._measurement_consumer = SynchronousMeasurementConsumer(
|
||||
sdk_config=self._sdk_config
|
||||
)
|
||||
disabled = environ.get(OTEL_SDK_DISABLED, "")
|
||||
self._disabled = disabled.lower().strip() == "true"
|
||||
|
||||
if shutdown_on_exit:
|
||||
self._atexit_handler = register(self.shutdown)
|
||||
|
||||
self._meters = {}
|
||||
self._shutdown_once = Once()
|
||||
self._shutdown = False
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
with self._all_metric_readers_lock:
|
||||
if metric_reader in self._all_metric_readers:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
f"MetricReader {metric_reader} has been registered "
|
||||
"already in other MeterProvider instance"
|
||||
)
|
||||
|
||||
self._all_metric_readers.add(metric_reader)
|
||||
|
||||
metric_reader._set_collect_callback(
|
||||
self._measurement_consumer.collect
|
||||
)
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
metric_reader_error = {}
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
current_ts = time_ns()
|
||||
try:
|
||||
if current_ts >= deadline_ns:
|
||||
raise MetricsTimeoutError(
|
||||
"Timed out while flushing metric readers"
|
||||
)
|
||||
metric_reader.force_flush(
|
||||
timeout_millis=(deadline_ns - current_ts) / 10**6
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error:
|
||||
metric_reader_error[metric_reader] = error
|
||||
|
||||
if metric_reader_error:
|
||||
metric_reader_error_string = "\n".join(
|
||||
[
|
||||
f"{metric_reader.__class__.__name__}: {repr(error)}"
|
||||
for metric_reader, error in metric_reader_error.items()
|
||||
]
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"MeterProvider.force_flush failed because the following "
|
||||
"metric readers failed during collect:\n"
|
||||
f"{metric_reader_error_string}"
|
||||
)
|
||||
return True
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000):
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
def _shutdown():
|
||||
self._shutdown = True
|
||||
|
||||
did_shutdown = self._shutdown_once.do_once(_shutdown)
|
||||
|
||||
if not did_shutdown:
|
||||
_logger.warning("shutdown can only be called once")
|
||||
return
|
||||
|
||||
metric_reader_error = {}
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
current_ts = time_ns()
|
||||
try:
|
||||
if current_ts >= deadline_ns:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"Didn't get to execute, deadline already exceeded"
|
||||
)
|
||||
metric_reader.shutdown(
|
||||
timeout_millis=(deadline_ns - current_ts) / 10**6
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error:
|
||||
metric_reader_error[metric_reader] = error
|
||||
|
||||
if self._atexit_handler is not None:
|
||||
unregister(self._atexit_handler)
|
||||
self._atexit_handler = None
|
||||
|
||||
if metric_reader_error:
|
||||
metric_reader_error_string = "\n".join(
|
||||
[
|
||||
f"{metric_reader.__class__.__name__}: {repr(error)}"
|
||||
for metric_reader, error in metric_reader_error.items()
|
||||
]
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
(
|
||||
"MeterProvider.shutdown failed because the following "
|
||||
"metric readers failed during shutdown:\n"
|
||||
f"{metric_reader_error_string}"
|
||||
)
|
||||
)
|
||||
|
||||
def get_meter(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> Meter:
|
||||
if self._disabled:
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
if self._shutdown:
|
||||
_logger.warning(
|
||||
"A shutdown `MeterProvider` can not provide a `Meter`"
|
||||
)
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
if not name:
|
||||
_logger.warning("Meter name cannot be None or empty.")
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
info = InstrumentationScope(name, version, schema_url, attributes)
|
||||
with self._meter_lock:
|
||||
if not self._meters.get(info):
|
||||
# FIXME #2558 pass SDKConfig object to meter so that the meter
|
||||
# has access to views.
|
||||
self._meters[info] = Meter(
|
||||
info,
|
||||
self._measurement_consumer,
|
||||
)
|
||||
return self._meters[info]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,153 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from logging import getLogger
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Dict, List, Optional, Sequence
|
||||
|
||||
from opentelemetry.metrics import Instrument
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
DefaultAggregation,
|
||||
_Aggregation,
|
||||
_SumAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.metrics._internal.point import DataPointT
|
||||
from opentelemetry.sdk.metrics._internal.view import View
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class _ViewInstrumentMatch:
|
||||
def __init__(
|
||||
self,
|
||||
view: View,
|
||||
instrument: Instrument,
|
||||
instrument_class_aggregation: Dict[type, Aggregation],
|
||||
):
|
||||
self._view = view
|
||||
self._instrument = instrument
|
||||
self._attributes_aggregation: Dict[frozenset, _Aggregation] = {}
|
||||
self._lock = Lock()
|
||||
self._instrument_class_aggregation = instrument_class_aggregation
|
||||
self._name = self._view._name or self._instrument.name
|
||||
self._description = (
|
||||
self._view._description or self._instrument.description
|
||||
)
|
||||
if not isinstance(self._view._aggregation, DefaultAggregation):
|
||||
self._aggregation = self._view._aggregation._create_aggregation(
|
||||
self._instrument,
|
||||
None,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
0,
|
||||
)
|
||||
else:
|
||||
self._aggregation = self._instrument_class_aggregation[
|
||||
self._instrument.__class__
|
||||
]._create_aggregation(
|
||||
self._instrument,
|
||||
None,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
0,
|
||||
)
|
||||
|
||||
def conflicts(self, other: "_ViewInstrumentMatch") -> bool:
|
||||
# pylint: disable=protected-access
|
||||
|
||||
result = (
|
||||
self._name == other._name
|
||||
and self._instrument.unit == other._instrument.unit
|
||||
# The aggregation class is being used here instead of data point
|
||||
# type since they are functionally equivalent.
|
||||
and self._aggregation.__class__ == other._aggregation.__class__
|
||||
)
|
||||
if isinstance(self._aggregation, _SumAggregation):
|
||||
result = (
|
||||
result
|
||||
and self._aggregation._instrument_is_monotonic
|
||||
== other._aggregation._instrument_is_monotonic
|
||||
and self._aggregation._instrument_aggregation_temporality
|
||||
== other._aggregation._instrument_aggregation_temporality
|
||||
)
|
||||
|
||||
return result
|
||||
|
||||
# pylint: disable=protected-access
|
||||
def consume_measurement(
|
||||
self, measurement: Measurement, should_sample_exemplar: bool = True
|
||||
) -> None:
|
||||
if self._view._attribute_keys is not None:
|
||||
attributes = {}
|
||||
|
||||
for key, value in (measurement.attributes or {}).items():
|
||||
if key in self._view._attribute_keys:
|
||||
attributes[key] = value
|
||||
elif measurement.attributes is not None:
|
||||
attributes = measurement.attributes
|
||||
else:
|
||||
attributes = {}
|
||||
|
||||
aggr_key = frozenset(attributes.items())
|
||||
|
||||
if aggr_key not in self._attributes_aggregation:
|
||||
with self._lock:
|
||||
if aggr_key not in self._attributes_aggregation:
|
||||
if not isinstance(
|
||||
self._view._aggregation, DefaultAggregation
|
||||
):
|
||||
aggregation = (
|
||||
self._view._aggregation._create_aggregation(
|
||||
self._instrument,
|
||||
attributes,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
time_ns(),
|
||||
)
|
||||
)
|
||||
else:
|
||||
aggregation = self._instrument_class_aggregation[
|
||||
self._instrument.__class__
|
||||
]._create_aggregation(
|
||||
self._instrument,
|
||||
attributes,
|
||||
self._view._exemplar_reservoir_factory,
|
||||
time_ns(),
|
||||
)
|
||||
self._attributes_aggregation[aggr_key] = aggregation
|
||||
|
||||
self._attributes_aggregation[aggr_key].aggregate(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
def collect(
|
||||
self,
|
||||
collection_aggregation_temporality: AggregationTemporality,
|
||||
collection_start_nanos: int,
|
||||
) -> Optional[Sequence[DataPointT]]:
|
||||
data_points: List[DataPointT] = []
|
||||
with self._lock:
|
||||
for aggregation in self._attributes_aggregation.values():
|
||||
data_point = aggregation.collect(
|
||||
collection_aggregation_temporality, collection_start_nanos
|
||||
)
|
||||
if data_point is not None:
|
||||
data_points.append(data_point)
|
||||
|
||||
# Returning here None instead of an empty list because the caller
|
||||
# does not consume a sequence and to be consistent with the rest of
|
||||
# collect methods that also return None.
|
||||
return data_points or None
|
||||
File diff suppressed because it is too large
Load Diff
@@ -0,0 +1,17 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class MetricsTimeoutError(Exception):
|
||||
"""Raised when a metrics function times out"""
|
||||
@@ -0,0 +1,39 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .exemplar import Exemplar
|
||||
from .exemplar_filter import (
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
ExemplarFilter,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from .exemplar_reservoir import (
|
||||
AlignedHistogramBucketExemplarReservoir,
|
||||
ExemplarReservoir,
|
||||
ExemplarReservoirBuilder,
|
||||
SimpleFixedSizeExemplarReservoir,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Exemplar",
|
||||
"ExemplarFilter",
|
||||
"AlwaysOffExemplarFilter",
|
||||
"AlwaysOnExemplarFilter",
|
||||
"TraceBasedExemplarFilter",
|
||||
"AlignedHistogramBucketExemplarReservoir",
|
||||
"ExemplarReservoir",
|
||||
"ExemplarReservoirBuilder",
|
||||
"SimpleFixedSizeExemplarReservoir",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,50 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import dataclasses
|
||||
from typing import Optional, Union
|
||||
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class Exemplar:
|
||||
"""A representation of an exemplar, which is a sample input measurement.
|
||||
|
||||
Exemplars also hold information about the environment when the measurement
|
||||
was recorded, for example the span and trace ID of the active span when the
|
||||
exemplar was recorded.
|
||||
|
||||
Attributes
|
||||
trace_id: (optional) The trace associated with a recording
|
||||
span_id: (optional) The span associated with a recording
|
||||
time_unix_nano: The time of the observation
|
||||
value: The recorded value
|
||||
filtered_attributes: A set of filtered attributes which provide additional insight into the Context when the observation was made.
|
||||
|
||||
References:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#exemplars
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
|
||||
"""
|
||||
|
||||
# TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
|
||||
# one will come from napoleon extension and the other from autodoc extension. This
|
||||
# will raise an sphinx error of duplicated object description
|
||||
# See https://github.com/sphinx-doc/sphinx/issues/8664
|
||||
|
||||
filtered_attributes: Attributes
|
||||
value: Union[int, float]
|
||||
time_unix_nano: int
|
||||
span_id: Optional[int] = None
|
||||
trace_id: Optional[int] = None
|
||||
@@ -0,0 +1,134 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from typing import Union
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.trace.span import INVALID_SPAN
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
class ExemplarFilter(ABC):
|
||||
"""``ExemplarFilter`` determines which measurements are eligible for becoming an
|
||||
``Exemplar``.
|
||||
|
||||
Exemplar filters are used to filter measurements before attempting to store them
|
||||
in a reservoir.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarfilter
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"ExemplarFilter.should_sample is not implemented"
|
||||
)
|
||||
|
||||
|
||||
class AlwaysOnExemplarFilter(ExemplarFilter):
|
||||
"""An ExemplarFilter which makes all measurements eligible for being an Exemplar.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwayson
|
||||
"""
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
return True
|
||||
|
||||
|
||||
class AlwaysOffExemplarFilter(ExemplarFilter):
|
||||
"""An ExemplarFilter which makes no measurements eligible for being an Exemplar.
|
||||
|
||||
Using this ExemplarFilter is as good as disabling Exemplar feature.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alwaysoff
|
||||
"""
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
return False
|
||||
|
||||
|
||||
class TraceBasedExemplarFilter(ExemplarFilter):
|
||||
"""An ExemplarFilter which makes those measurements eligible for being an Exemplar,
|
||||
which are recorded in the context of a sampled parent span.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#tracebased
|
||||
"""
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> bool:
|
||||
"""Returns whether or not a reservoir should attempt to filter a measurement.
|
||||
|
||||
Args:
|
||||
value: The value of the measurement
|
||||
timestamp: A timestamp that best represents when the measurement was taken
|
||||
attributes: The complete set of measurement attributes
|
||||
context: The Context of the measurement
|
||||
"""
|
||||
span = trace.get_current_span(context)
|
||||
if span == INVALID_SPAN:
|
||||
return False
|
||||
return span.get_span_context().trace_flags.sampled
|
||||
@@ -0,0 +1,332 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from collections import defaultdict
|
||||
from random import randrange
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Union,
|
||||
)
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.trace.span import INVALID_SPAN
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
from .exemplar import Exemplar
|
||||
|
||||
|
||||
class ExemplarReservoir(ABC):
|
||||
"""ExemplarReservoir provide a method to offer measurements to the reservoir
|
||||
and another to collect accumulated Exemplars.
|
||||
|
||||
Note:
|
||||
The constructor MUST accept ``**kwargs`` that may be set from aggregation
|
||||
parameters.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplarreservoir
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
"""
|
||||
raise NotImplementedError("ExemplarReservoir.offer is not implemented")
|
||||
|
||||
@abstractmethod
|
||||
def collect(self, point_attributes: Attributes) -> List[Exemplar]:
|
||||
"""Returns accumulated Exemplars and also resets the reservoir for the next
|
||||
sampling period
|
||||
|
||||
Args:
|
||||
point_attributes: The attributes associated with metric point.
|
||||
|
||||
Returns:
|
||||
a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
|
||||
exemplars contain the attributes that were filtered out by the aggregator,
|
||||
but recorded alongside the original measurement.
|
||||
"""
|
||||
raise NotImplementedError(
|
||||
"ExemplarReservoir.collect is not implemented"
|
||||
)
|
||||
|
||||
|
||||
class ExemplarBucket:
|
||||
def __init__(self) -> None:
|
||||
self.__value: Union[int, float] = 0
|
||||
self.__attributes: Attributes = None
|
||||
self.__time_unix_nano: int = 0
|
||||
self.__span_id: Optional[int] = None
|
||||
self.__trace_id: Optional[int] = None
|
||||
self.__offered: bool = False
|
||||
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
"""
|
||||
self.__value = value
|
||||
self.__time_unix_nano = time_unix_nano
|
||||
self.__attributes = attributes
|
||||
span = trace.get_current_span(context)
|
||||
if span != INVALID_SPAN:
|
||||
span_context = span.get_span_context()
|
||||
self.__span_id = span_context.span_id
|
||||
self.__trace_id = span_context.trace_id
|
||||
|
||||
self.__offered = True
|
||||
|
||||
def collect(self, point_attributes: Attributes) -> Optional[Exemplar]:
|
||||
"""May return an Exemplar and resets the bucket for the next sampling period."""
|
||||
if not self.__offered:
|
||||
return None
|
||||
|
||||
# filters out attributes from the measurement that are already included in the metric data point
|
||||
# See the specification for more details:
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exemplar
|
||||
filtered_attributes = (
|
||||
{
|
||||
k: v
|
||||
for k, v in self.__attributes.items()
|
||||
if k not in point_attributes
|
||||
}
|
||||
if self.__attributes
|
||||
else None
|
||||
)
|
||||
|
||||
exemplar = Exemplar(
|
||||
filtered_attributes,
|
||||
self.__value,
|
||||
self.__time_unix_nano,
|
||||
self.__span_id,
|
||||
self.__trace_id,
|
||||
)
|
||||
self.__reset()
|
||||
return exemplar
|
||||
|
||||
def __reset(self) -> None:
|
||||
"""Reset the bucket state after a collection cycle."""
|
||||
self.__value = 0
|
||||
self.__attributes = {}
|
||||
self.__time_unix_nano = 0
|
||||
self.__span_id = None
|
||||
self.__trace_id = None
|
||||
self.__offered = False
|
||||
|
||||
|
||||
class BucketIndexError(ValueError):
|
||||
"""An exception raised when the bucket index cannot be found."""
|
||||
|
||||
|
||||
class FixedSizeExemplarReservoirABC(ExemplarReservoir):
|
||||
"""Abstract class for a reservoir with fixed size."""
|
||||
|
||||
def __init__(self, size: int, **kwargs) -> None:
|
||||
super().__init__(**kwargs)
|
||||
self._size: int = size
|
||||
self._reservoir_storage: Mapping[int, ExemplarBucket] = defaultdict(
|
||||
ExemplarBucket
|
||||
)
|
||||
|
||||
def collect(self, point_attributes: Attributes) -> List[Exemplar]:
|
||||
"""Returns accumulated Exemplars and also resets the reservoir for the next
|
||||
sampling period
|
||||
|
||||
Args:
|
||||
point_attributes: The attributes associated with metric point.
|
||||
|
||||
Returns:
|
||||
a list of ``opentelemetry.sdk.metrics._internal.exemplar.exemplar.Exemplar`` s. Returned
|
||||
exemplars contain the attributes that were filtered out by the aggregator,
|
||||
but recorded alongside the original measurement.
|
||||
"""
|
||||
exemplars = [
|
||||
e
|
||||
for e in (
|
||||
bucket.collect(point_attributes)
|
||||
for _, bucket in sorted(self._reservoir_storage.items())
|
||||
)
|
||||
if e is not None
|
||||
]
|
||||
self._reset()
|
||||
return exemplars
|
||||
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
"""
|
||||
try:
|
||||
index = self._find_bucket_index(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
|
||||
self._reservoir_storage[index].offer(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
except BucketIndexError:
|
||||
# Ignore invalid bucket index
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _find_bucket_index(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> int:
|
||||
"""Determines the bucket index for the given measurement.
|
||||
|
||||
It should be implemented by subclasses based on specific strategies.
|
||||
|
||||
Args:
|
||||
value: Measured value
|
||||
time_unix_nano: Measurement instant
|
||||
attributes: Measurement attributes
|
||||
context: Measurement context
|
||||
|
||||
Returns:
|
||||
The bucket index
|
||||
|
||||
Raises:
|
||||
BucketIndexError: If no bucket index can be found.
|
||||
"""
|
||||
|
||||
def _reset(self) -> None:
|
||||
"""Reset the reservoir by resetting any stateful logic after a collection cycle."""
|
||||
|
||||
|
||||
class SimpleFixedSizeExemplarReservoir(FixedSizeExemplarReservoirABC):
|
||||
"""This reservoir uses an uniformly-weighted sampling algorithm based on the number
|
||||
of samples the reservoir has seen so far to determine if the offered measurements
|
||||
should be sampled.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#simplefixedsizeexemplarreservoir
|
||||
"""
|
||||
|
||||
def __init__(self, size: int = 1, **kwargs) -> None:
|
||||
super().__init__(size, **kwargs)
|
||||
self._measurements_seen: int = 0
|
||||
|
||||
def _reset(self) -> None:
|
||||
super()._reset()
|
||||
self._measurements_seen = 0
|
||||
|
||||
def _find_bucket_index(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> int:
|
||||
self._measurements_seen += 1
|
||||
if self._measurements_seen < self._size:
|
||||
return self._measurements_seen - 1
|
||||
|
||||
index = randrange(0, self._measurements_seen)
|
||||
if index < self._size:
|
||||
return index
|
||||
|
||||
raise BucketIndexError("Unable to find the bucket index.")
|
||||
|
||||
|
||||
class AlignedHistogramBucketExemplarReservoir(FixedSizeExemplarReservoirABC):
|
||||
"""This Exemplar reservoir takes a configuration parameter that is the
|
||||
configuration of a Histogram. This implementation keeps the last seen measurement
|
||||
that falls within a histogram bucket.
|
||||
|
||||
Reference:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#alignedhistogrambucketexemplarreservoir
|
||||
"""
|
||||
|
||||
def __init__(self, boundaries: Sequence[float], **kwargs) -> None:
|
||||
super().__init__(len(boundaries) + 1, **kwargs)
|
||||
self._boundaries: Sequence[float] = boundaries
|
||||
|
||||
def offer(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> None:
|
||||
"""Offers a measurement to be sampled."""
|
||||
index = self._find_bucket_index(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
self._reservoir_storage[index].offer(
|
||||
value, time_unix_nano, attributes, context
|
||||
)
|
||||
|
||||
def _find_bucket_index(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
time_unix_nano: int,
|
||||
attributes: Attributes,
|
||||
context: Context,
|
||||
) -> int:
|
||||
for index, boundary in enumerate(self._boundaries):
|
||||
if value <= boundary:
|
||||
return index
|
||||
return len(self._boundaries)
|
||||
|
||||
|
||||
ExemplarReservoirBuilder = Callable[[Dict[str, Any]], ExemplarReservoir]
|
||||
ExemplarReservoirBuilder.__doc__ = """ExemplarReservoir builder.
|
||||
|
||||
It may receive the Aggregation parameters it is bounded to; e.g.
|
||||
the _ExplicitBucketHistogramAggregation will provide the boundaries.
|
||||
"""
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,190 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from math import ceil, log2
|
||||
|
||||
|
||||
class Buckets:
|
||||
# No method of this class is protected by locks because instances of this
|
||||
# class are only used in methods that are protected by locks themselves.
|
||||
|
||||
def __init__(self):
|
||||
self._counts = [0]
|
||||
|
||||
# The term index refers to the number of the exponential histogram bucket
|
||||
# used to determine its boundaries. The lower boundary of a bucket is
|
||||
# determined by base ** index and the upper boundary of a bucket is
|
||||
# determined by base ** (index + 1). index values are signedto account
|
||||
# for values less than or equal to 1.
|
||||
|
||||
# self._index_* will all have values equal to a certain index that is
|
||||
# determined by the corresponding mapping _map_to_index function and
|
||||
# the value of the index depends on the value passed to _map_to_index.
|
||||
|
||||
# Index of the 0th position in self._counts: self._counts[0] is the
|
||||
# count in the bucket with index self.__index_base.
|
||||
self.__index_base = 0
|
||||
|
||||
# self.__index_start is the smallest index value represented in
|
||||
# self._counts.
|
||||
self.__index_start = 0
|
||||
|
||||
# self.__index_start is the largest index value represented in
|
||||
# self._counts.
|
||||
self.__index_end = 0
|
||||
|
||||
@property
|
||||
def index_start(self) -> int:
|
||||
return self.__index_start
|
||||
|
||||
@index_start.setter
|
||||
def index_start(self, value: int) -> None:
|
||||
self.__index_start = value
|
||||
|
||||
@property
|
||||
def index_end(self) -> int:
|
||||
return self.__index_end
|
||||
|
||||
@index_end.setter
|
||||
def index_end(self, value: int) -> None:
|
||||
self.__index_end = value
|
||||
|
||||
@property
|
||||
def index_base(self) -> int:
|
||||
return self.__index_base
|
||||
|
||||
@index_base.setter
|
||||
def index_base(self, value: int) -> None:
|
||||
self.__index_base = value
|
||||
|
||||
@property
|
||||
def counts(self):
|
||||
return self._counts
|
||||
|
||||
def get_offset_counts(self):
|
||||
bias = self.__index_base - self.__index_start
|
||||
return self._counts[-bias:] + self._counts[:-bias]
|
||||
|
||||
def grow(self, needed: int, max_size: int) -> None:
|
||||
size = len(self._counts)
|
||||
bias = self.__index_base - self.__index_start
|
||||
old_positive_limit = size - bias
|
||||
|
||||
# 2 ** ceil(log2(needed)) finds the smallest power of two that is larger
|
||||
# or equal than needed:
|
||||
# 2 ** ceil(log2(1)) == 1
|
||||
# 2 ** ceil(log2(2)) == 2
|
||||
# 2 ** ceil(log2(3)) == 4
|
||||
# 2 ** ceil(log2(4)) == 4
|
||||
# 2 ** ceil(log2(5)) == 8
|
||||
# 2 ** ceil(log2(6)) == 8
|
||||
# 2 ** ceil(log2(7)) == 8
|
||||
# 2 ** ceil(log2(8)) == 8
|
||||
new_size = min(2 ** ceil(log2(needed)), max_size)
|
||||
|
||||
new_positive_limit = new_size - bias
|
||||
|
||||
tmp = [0] * new_size
|
||||
tmp[new_positive_limit:] = self._counts[old_positive_limit:]
|
||||
tmp[0:old_positive_limit] = self._counts[0:old_positive_limit]
|
||||
self._counts = tmp
|
||||
|
||||
@property
|
||||
def offset(self) -> int:
|
||||
return self.__index_start
|
||||
|
||||
def __len__(self) -> int:
|
||||
if len(self._counts) == 0:
|
||||
return 0
|
||||
|
||||
if self.__index_end == self.__index_start and self[0] == 0:
|
||||
return 0
|
||||
|
||||
return self.__index_end - self.__index_start + 1
|
||||
|
||||
def __getitem__(self, key: int) -> int:
|
||||
bias = self.__index_base - self.__index_start
|
||||
|
||||
if key < bias:
|
||||
key += len(self._counts)
|
||||
|
||||
key -= bias
|
||||
|
||||
return self._counts[key]
|
||||
|
||||
def downscale(self, amount: int) -> None:
|
||||
"""
|
||||
Rotates, then collapses 2 ** amount to 1 buckets.
|
||||
"""
|
||||
|
||||
bias = self.__index_base - self.__index_start
|
||||
|
||||
if bias != 0:
|
||||
self.__index_base = self.__index_start
|
||||
|
||||
# [0, 1, 2, 3, 4] Original backing array
|
||||
|
||||
self._counts = self._counts[::-1]
|
||||
# [4, 3, 2, 1, 0]
|
||||
|
||||
self._counts = (
|
||||
self._counts[:bias][::-1] + self._counts[bias:][::-1]
|
||||
)
|
||||
# [3, 4, 0, 1, 2] This is a rotation of the backing array.
|
||||
|
||||
size = 1 + self.__index_end - self.__index_start
|
||||
each = 1 << amount
|
||||
inpos = 0
|
||||
outpos = 0
|
||||
|
||||
pos = self.__index_start
|
||||
|
||||
while pos <= self.__index_end:
|
||||
mod = pos % each
|
||||
if mod < 0:
|
||||
mod += each
|
||||
|
||||
index = mod
|
||||
|
||||
while index < each and inpos < size:
|
||||
if outpos != inpos:
|
||||
self._counts[outpos] += self._counts[inpos]
|
||||
self._counts[inpos] = 0
|
||||
|
||||
inpos += 1
|
||||
pos += 1
|
||||
index += 1
|
||||
|
||||
outpos += 1
|
||||
|
||||
self.__index_start >>= amount
|
||||
self.__index_end >>= amount
|
||||
self.__index_base = self.__index_start
|
||||
|
||||
def increment_bucket(self, bucket_index: int, increment: int = 1) -> None:
|
||||
self._counts[bucket_index] += increment
|
||||
|
||||
def copy_empty(self) -> "Buckets":
|
||||
copy = Buckets()
|
||||
|
||||
# pylint: disable=no-member
|
||||
# pylint: disable=protected-access
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
# pylint: disable=invalid-name
|
||||
copy._Buckets__index_base = self._Buckets__index_base
|
||||
copy._Buckets__index_start = self._Buckets__index_start
|
||||
copy._Buckets__index_end = self._Buckets__index_end
|
||||
copy._counts = [0 for _ in self._counts]
|
||||
|
||||
return copy
|
||||
@@ -0,0 +1,98 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class Mapping(ABC):
|
||||
"""
|
||||
Parent class for `LogarithmMapping` and `ExponentialMapping`.
|
||||
"""
|
||||
|
||||
# pylint: disable=no-member
|
||||
def __new__(cls, scale: int):
|
||||
with cls._mappings_lock:
|
||||
# cls._mappings and cls._mappings_lock are implemented in each of
|
||||
# the child classes as a dictionary and a lock, respectively. They
|
||||
# are not instantiated here because that would lead to both child
|
||||
# classes having the same instance of cls._mappings and
|
||||
# cls._mappings_lock.
|
||||
if scale not in cls._mappings:
|
||||
cls._mappings[scale] = super().__new__(cls)
|
||||
cls._mappings[scale]._init(scale)
|
||||
|
||||
return cls._mappings[scale]
|
||||
|
||||
@abstractmethod
|
||||
def _init(self, scale: int) -> None:
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
if scale > self._get_max_scale():
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(f"scale is larger than {self._max_scale}")
|
||||
|
||||
if scale < self._get_min_scale():
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(f"scale is smaller than {self._min_scale}")
|
||||
|
||||
# The size of the exponential histogram buckets is determined by a
|
||||
# parameter known as scale, larger values of scale will produce smaller
|
||||
# buckets. Bucket boundaries of the exponential histogram are located
|
||||
# at integer powers of the base, where:
|
||||
#
|
||||
# base = 2 ** (2 ** (-scale))
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/data-model.md#all-scales-use-the-logarithm-function
|
||||
self._scale = scale
|
||||
|
||||
@abstractmethod
|
||||
def _get_min_scale(self) -> int:
|
||||
"""
|
||||
Return the smallest possible value for the mapping scale
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def _get_max_scale(self) -> int:
|
||||
"""
|
||||
Return the largest possible value for the mapping scale
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def map_to_index(self, value: float) -> int:
|
||||
"""
|
||||
Maps positive floating point values to indexes corresponding to
|
||||
`Mapping.scale`. Implementations are not expected to handle zeros,
|
||||
+inf, NaN, or negative values.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_lower_boundary(self, index: int) -> float:
|
||||
"""
|
||||
Returns the lower boundary of a given bucket index. The index is
|
||||
expected to map onto a range that is at least partially inside the
|
||||
range of normal floating point values. If the corresponding
|
||||
bucket's upper boundary is less than or equal to 2 ** -1022,
|
||||
:class:`~opentelemetry.sdk.metrics.MappingUnderflowError`
|
||||
will be raised. If the corresponding bucket's lower boundary is greater
|
||||
than ``sys.float_info.max``,
|
||||
:class:`~opentelemetry.sdk.metrics.MappingOverflowError`
|
||||
will be raised.
|
||||
"""
|
||||
|
||||
@property
|
||||
def scale(self) -> int:
|
||||
"""
|
||||
Returns the parameter that controls the resolution of this mapping.
|
||||
See: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/datamodel.md#exponential-scale
|
||||
"""
|
||||
return self._scale
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,26 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class MappingUnderflowError(Exception):
|
||||
"""
|
||||
Raised when computing the lower boundary of an index that maps into a
|
||||
denormal floating point value.
|
||||
"""
|
||||
|
||||
|
||||
class MappingOverflowError(Exception):
|
||||
"""
|
||||
Raised when computing the lower boundary of an index that maps into +inf.
|
||||
"""
|
||||
@@ -0,0 +1,141 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from math import ldexp
|
||||
from threading import Lock
|
||||
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
|
||||
Mapping,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import (
|
||||
MappingOverflowError,
|
||||
MappingUnderflowError,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import (
|
||||
MANTISSA_WIDTH,
|
||||
MAX_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_VALUE,
|
||||
get_ieee_754_exponent,
|
||||
get_ieee_754_mantissa,
|
||||
)
|
||||
|
||||
|
||||
class ExponentMapping(Mapping):
|
||||
# Reference implementation here:
|
||||
# https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/exponent/exponent.go
|
||||
|
||||
_mappings = {}
|
||||
_mappings_lock = Lock()
|
||||
|
||||
_min_scale = -10
|
||||
_max_scale = 0
|
||||
|
||||
def _get_min_scale(self):
|
||||
# _min_scale defines the point at which the exponential mapping
|
||||
# function becomes useless for 64-bit floats. With scale -10, ignoring
|
||||
# subnormal values, bucket indices range from -1 to 1.
|
||||
return -10
|
||||
|
||||
def _get_max_scale(self):
|
||||
# _max_scale is the largest scale supported by exponential mapping. Use
|
||||
# a logarithm mapping for larger scales.
|
||||
return 0
|
||||
|
||||
def _init(self, scale: int):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
super()._init(scale)
|
||||
|
||||
# self._min_normal_lower_boundary_index is the largest index such that
|
||||
# base ** index < MIN_NORMAL_VALUE and
|
||||
# base ** (index + 1) >= MIN_NORMAL_VALUE. An exponential histogram
|
||||
# bucket with this index covers the range
|
||||
# (base ** index, base (index + 1)], including MIN_NORMAL_VALUE. This
|
||||
# is the smallest valid index that contains at least one normal value.
|
||||
index = MIN_NORMAL_EXPONENT >> -self._scale
|
||||
|
||||
if -self._scale < 2:
|
||||
# For scales -1 and 0, the maximum value 2 ** -1022 is a
|
||||
# power-of-two multiple, meaning base ** index == MIN_NORMAL_VALUE.
|
||||
# Subtracting 1 so that base ** (index + 1) == MIN_NORMAL_VALUE.
|
||||
index -= 1
|
||||
|
||||
self._min_normal_lower_boundary_index = index
|
||||
|
||||
# self._max_normal_lower_boundary_index is the index such that
|
||||
# base**index equals the greatest representable lower boundary. An
|
||||
# exponential histogram bucket with this index covers the range
|
||||
# ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk.
|
||||
# metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE.
|
||||
# This bucket is incomplete, since the upper boundary cannot be
|
||||
# represented. One greater than this index corresponds with the bucket
|
||||
# containing values > 2 ** 1024.
|
||||
self._max_normal_lower_boundary_index = (
|
||||
MAX_NORMAL_EXPONENT >> -self._scale
|
||||
)
|
||||
|
||||
def map_to_index(self, value: float) -> int:
|
||||
if value < MIN_NORMAL_VALUE:
|
||||
return self._min_normal_lower_boundary_index
|
||||
|
||||
exponent = get_ieee_754_exponent(value)
|
||||
|
||||
# Positive integers are represented in binary as having an infinite
|
||||
# amount of leading zeroes, for example 2 is represented as ...00010.
|
||||
|
||||
# A negative integer -x is represented in binary as the complement of
|
||||
# (x - 1). For example, -4 is represented as the complement of 4 - 1
|
||||
# == 3. 3 is represented as ...00011. Its compliment is ...11100, the
|
||||
# binary representation of -4.
|
||||
|
||||
# get_ieee_754_mantissa(value) gets the positive integer made up
|
||||
# from the rightmost MANTISSA_WIDTH bits (the mantissa) of the IEEE
|
||||
# 754 representation of value. If value is an exact power of 2, all
|
||||
# these MANTISSA_WIDTH bits would be all zeroes, and when 1 is
|
||||
# subtracted the resulting value is -1. The binary representation of
|
||||
# -1 is ...111, so when these bits are right shifted MANTISSA_WIDTH
|
||||
# places, the resulting value for correction is -1. If value is not an
|
||||
# exact power of 2, at least one of the rightmost MANTISSA_WIDTH
|
||||
# bits would be 1 (even for values whose decimal part is 0, like 5.0
|
||||
# since the IEEE 754 of such number is too the product of a power of 2
|
||||
# (defined in the exponent part of the IEEE 754 representation) and the
|
||||
# value defined in the mantissa). Having at least one of the rightmost
|
||||
# MANTISSA_WIDTH bit being 1 means that get_ieee_754(value) will
|
||||
# always be greater or equal to 1, and when 1 is subtracted, the
|
||||
# result will be greater or equal to 0, whose representation in binary
|
||||
# will be of at most MANTISSA_WIDTH ones that have an infinite
|
||||
# amount of leading zeroes. When those MANTISSA_WIDTH bits are
|
||||
# shifted to the right MANTISSA_WIDTH places, the resulting value
|
||||
# will be 0.
|
||||
|
||||
# In summary, correction will be -1 if value is a power of 2, 0 if not.
|
||||
|
||||
# FIXME Document why we can assume value will not be 0, inf, or NaN.
|
||||
correction = (get_ieee_754_mantissa(value) - 1) >> MANTISSA_WIDTH
|
||||
|
||||
return (exponent + correction) >> -self._scale
|
||||
|
||||
def get_lower_boundary(self, index: int) -> float:
|
||||
if index < self._min_normal_lower_boundary_index:
|
||||
raise MappingUnderflowError()
|
||||
|
||||
if index > self._max_normal_lower_boundary_index:
|
||||
raise MappingOverflowError()
|
||||
|
||||
return ldexp(1, index << -self._scale)
|
||||
|
||||
@property
|
||||
def scale(self) -> int:
|
||||
return self._scale
|
||||
@@ -0,0 +1,175 @@
|
||||
# IEEE 754 Explained
|
||||
|
||||
IEEE 754 is a standard that defines a way to represent certain mathematical
|
||||
objects using binary numbers.
|
||||
|
||||
## Binary Number Fields
|
||||
|
||||
The binary numbers used in IEEE 754 can have different lengths, the length that
|
||||
is interesting for the purposes of this project is 64 bits. These binary
|
||||
numbers are made up of 3 contiguous fields of bits, from left to right:
|
||||
|
||||
1. 1 sign bit
|
||||
2. 11 exponent bits
|
||||
3. 52 mantissa bits
|
||||
|
||||
Depending on the values these fields have, the represented mathematical object
|
||||
can be one of:
|
||||
|
||||
* Floating point number
|
||||
* Zero
|
||||
* NaN
|
||||
* Infinite
|
||||
|
||||
## Floating Point Numbers
|
||||
|
||||
IEEE 754 represents a floating point number $f$ using an exponential
|
||||
notation with 4 components: $sign$, $mantissa$, $base$ and $exponent$:
|
||||
|
||||
$$f = sign \times mantissa \times base ^ {exponent}$$
|
||||
|
||||
There are two possible representations of floating point numbers:
|
||||
_normal_ and _denormal_, which have different valid values for
|
||||
their $mantissa$ and $exponent$ fields.
|
||||
|
||||
### Binary Representation
|
||||
|
||||
$sign$, $mantissa$, and $exponent$ are represented in binary, the
|
||||
representation of each component has certain details explained next.
|
||||
|
||||
$base$ is always $2$ and it is not represented in binary.
|
||||
|
||||
#### Sign
|
||||
|
||||
$sign$ can have 2 values:
|
||||
|
||||
1. $1$ if the `sign` bit is `0`
|
||||
2. $-1$ if the `sign` bit is `1`.
|
||||
|
||||
#### Mantissa
|
||||
|
||||
##### Normal Floating Point Numbers
|
||||
|
||||
$mantissa$ is a positive fractional number whose integer part is $1$, for example
|
||||
$1.2345 \dots$. The `mantissa` bits represent only the fractional part and the
|
||||
$mantissa$ value can be calculated as:
|
||||
|
||||
$$mantissa = 1 + \sum_{i=1}^{52} b_{i} \times 2^{-i} = 1 + \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$
|
||||
|
||||
Where $b_{i}$ is:
|
||||
|
||||
1. $0$ if the bit at the position `i - 1` is `0`.
|
||||
2. $1$ if the bit at the position `i - 1` is `1`.
|
||||
|
||||
##### Denormal Floating Point Numbers
|
||||
|
||||
$mantissa$ is a positive fractional number whose integer part is $0$, for example
|
||||
$0.12345 \dots$. The `mantissa` bits represent only the fractional part and the
|
||||
$mantissa$ value can be calculated as:
|
||||
|
||||
$$mantissa = \sum_{i=1}^{52} b_{i} \times 2^{-i} = \frac{b_{1}}{2^{1}} + \frac{b_{2}}{2^{2}} + \dots + \frac{b_{51}}{2^{51}} + \frac{b_{52}}{2^{52}}$$
|
||||
|
||||
Where $b_{i}$ is:
|
||||
|
||||
1. $0$ if the bit at the position `i - 1` is `0`.
|
||||
2. $1$ if the bit at the position `i - 1` is `1`.
|
||||
|
||||
#### Exponent
|
||||
|
||||
##### Normal Floating Point Numbers
|
||||
|
||||
Only the following bit sequences are allowed: `00000000001` to `11111111110`.
|
||||
That is, there must be at least one `0` and one `1` in the exponent bits.
|
||||
|
||||
The actual value of the $exponent$ can be calculated as:
|
||||
|
||||
$$exponent = v - bias$$
|
||||
|
||||
where $v$ is the value of the binary number in the exponent bits and $bias$ is $1023$.
|
||||
Considering the restrictions above, the respective minimum and maximum values for the
|
||||
exponent are:
|
||||
|
||||
1. `00000000001` = $1$, $1 - 1023 = -1022$
|
||||
2. `11111111110` = $2046$, $2046 - 1023 = 1023$
|
||||
|
||||
So, $exponent$ is an integer in the range $\left[-1022, 1023\right]$.
|
||||
|
||||
|
||||
##### Denormal Floating Point Numbers
|
||||
|
||||
$exponent$ is always $-1022$. Nevertheless, it is always represented as `00000000000`.
|
||||
|
||||
### Normal and Denormal Floating Point Numbers
|
||||
|
||||
The smallest absolute value a normal floating point number can have is calculated
|
||||
like this:
|
||||
|
||||
$$1 \times 1.0\dots0 \times 2^{-1022} = 2.2250738585072014 \times 10^{-308}$$
|
||||
|
||||
Since normal floating point numbers always have a $1$ as the integer part of the
|
||||
$mantissa$, then smaller values can be achieved by using the smallest possible exponent
|
||||
( $-1022$ ) and a $0$ in the integer part of the $mantissa$, but significant digits are lost.
|
||||
|
||||
The smallest absolute value a denormal floating point number can have is calculated
|
||||
like this:
|
||||
|
||||
$$1 \times 2^{-52} \times 2^{-1022} = 5 \times 10^{-324}$$
|
||||
|
||||
## Zero
|
||||
|
||||
Zero is represented like this:
|
||||
|
||||
* Sign bit: `X`
|
||||
* Exponent bits: `00000000000`
|
||||
* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
## NaN
|
||||
|
||||
There are 2 kinds of NaNs that are represented:
|
||||
|
||||
1. QNaNs (Quiet NaNs): represent the result of indeterminate operations.
|
||||
2. SNaNs (Signalling NaNs): represent the result of invalid operations.
|
||||
|
||||
### QNaNs
|
||||
|
||||
QNaNs are represented like this:
|
||||
|
||||
* Sign bit: `X`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `1XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
### SNaNs
|
||||
|
||||
SNaNs are represented like this:
|
||||
|
||||
* Sign bit: `X`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `0XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX1`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
## Infinite
|
||||
|
||||
### Positive Infinite
|
||||
|
||||
Positive infinite is represented like this:
|
||||
|
||||
* Sign bit: `0`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
|
||||
### Negative Infinite
|
||||
|
||||
Negative infinite is represented like this:
|
||||
|
||||
* Sign bit: `1`
|
||||
* Exponent bits: `11111111111`
|
||||
* Mantissa bits: `0000000000000000000000000000000000000000000000000000`
|
||||
|
||||
where `X` means `0` or `1`.
|
||||
@@ -0,0 +1,117 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from ctypes import c_double, c_uint64
|
||||
from sys import float_info
|
||||
|
||||
# IEEE 754 64-bit floating point numbers use 11 bits for the exponent and 52
|
||||
# bits for the mantissa.
|
||||
MANTISSA_WIDTH = 52
|
||||
EXPONENT_WIDTH = 11
|
||||
|
||||
# This mask is equivalent to 52 "1" bits (there are 13 hexadecimal 4-bit "f"s
|
||||
# in the mantissa mask, 13 * 4 == 52) or 0xfffffffffffff in hexadecimal.
|
||||
MANTISSA_MASK = (1 << MANTISSA_WIDTH) - 1
|
||||
|
||||
# There are 11 bits for the exponent, but the exponent values 0 (11 "0"
|
||||
# bits) and 2047 (11 "1" bits) have special meanings so the exponent range is
|
||||
# from 1 to 2046. To calculate the exponent value, 1023 (the bias) is
|
||||
# subtracted from the exponent, so the exponent value range is from -1022 to
|
||||
# +1023.
|
||||
EXPONENT_BIAS = (2 ** (EXPONENT_WIDTH - 1)) - 1
|
||||
|
||||
# All the exponent mask bits are set to 1 for the 11 exponent bits.
|
||||
EXPONENT_MASK = ((1 << EXPONENT_WIDTH) - 1) << MANTISSA_WIDTH
|
||||
|
||||
# The sign mask has the first bit set to 1 and the rest to 0.
|
||||
SIGN_MASK = 1 << (EXPONENT_WIDTH + MANTISSA_WIDTH)
|
||||
|
||||
# For normal floating point numbers, the exponent can have a value in the
|
||||
# range [-1022, 1023].
|
||||
MIN_NORMAL_EXPONENT = -EXPONENT_BIAS + 1
|
||||
MAX_NORMAL_EXPONENT = EXPONENT_BIAS
|
||||
|
||||
# The smallest possible normal value is 2.2250738585072014e-308.
|
||||
# This value is the result of using the smallest possible number in the
|
||||
# mantissa, 1.0000000000000000000000000000000000000000000000000000 (52 "0"s in
|
||||
# the fractional part) and a single "1" in the exponent.
|
||||
# Finally 1 * (2 ** -1022) = 2.2250738585072014e-308.
|
||||
MIN_NORMAL_VALUE = float_info.min
|
||||
|
||||
# Greatest possible normal value (1.7976931348623157e+308)
|
||||
# The binary representation of a float in scientific notation uses (for the
|
||||
# mantissa) one bit for the integer part (which is implicit) and 52 bits for
|
||||
# the fractional part. Consider a float binary 1.111. It is equal to 1 + 1/2 +
|
||||
# 1/4 + 1/8. The greatest possible value in the 52-bit binary mantissa would be
|
||||
# then 1.1111111111111111111111111111111111111111111111111111 (52 "1"s in the
|
||||
# fractional part) whose decimal value is 1.9999999999999998. Finally,
|
||||
# 1.9999999999999998 * (2 ** 1023) = 1.7976931348623157e+308.
|
||||
MAX_NORMAL_VALUE = float_info.max
|
||||
|
||||
|
||||
def get_ieee_754_exponent(value: float) -> int:
|
||||
"""
|
||||
Gets the exponent of the IEEE 754 representation of a float.
|
||||
"""
|
||||
|
||||
return (
|
||||
(
|
||||
# This step gives the integer that corresponds to the IEEE 754
|
||||
# representation of a float. For example, consider
|
||||
# -MAX_NORMAL_VALUE for an example. We choose this value because
|
||||
# of its binary representation which makes easy to understand the
|
||||
# subsequent operations.
|
||||
#
|
||||
# c_uint64.from_buffer(c_double(-MAX_NORMAL_VALUE)).value == 18442240474082181119
|
||||
# bin(18442240474082181119) == '0b1111111111101111111111111111111111111111111111111111111111111111'
|
||||
#
|
||||
# The first bit of the previous binary number is the sign bit: 1 (1 means negative, 0 means positive)
|
||||
# The next 11 bits are the exponent bits: 11111111110
|
||||
# The next 52 bits are the mantissa bits: 1111111111111111111111111111111111111111111111111111
|
||||
#
|
||||
# This step isolates the exponent bits, turning every bit outside
|
||||
# of the exponent field (sign and mantissa bits) to 0.
|
||||
c_uint64.from_buffer(c_double(value)).value & EXPONENT_MASK
|
||||
# For the example this means:
|
||||
# 18442240474082181119 & EXPONENT_MASK == 9214364837600034816
|
||||
# bin(9214364837600034816) == '0b111111111100000000000000000000000000000000000000000000000000000'
|
||||
# Notice that the previous binary representation does not include
|
||||
# leading zeroes, so the sign bit is not included since it is a
|
||||
# zero.
|
||||
)
|
||||
# This step moves the exponent bits to the right, removing the
|
||||
# mantissa bits that were set to 0 by the previous step. This
|
||||
# leaves the IEEE 754 exponent value, ready for the next step.
|
||||
>> MANTISSA_WIDTH
|
||||
# For the example this means:
|
||||
# 9214364837600034816 >> MANTISSA_WIDTH == 2046
|
||||
# bin(2046) == '0b11111111110'
|
||||
# As shown above, these are the original 11 bits that correspond to the
|
||||
# exponent.
|
||||
# This step subtracts the exponent bias from the IEEE 754 value,
|
||||
# leaving the actual exponent value.
|
||||
) - EXPONENT_BIAS
|
||||
# For the example this means:
|
||||
# 2046 - EXPONENT_BIAS == 1023
|
||||
# As mentioned in a comment above, the largest value for the exponent is
|
||||
|
||||
|
||||
def get_ieee_754_mantissa(value: float) -> int:
|
||||
return (
|
||||
c_uint64.from_buffer(c_double(value)).value
|
||||
# This step isolates the mantissa bits. There is no need to do any
|
||||
# bit shifting as the mantissa bits are already the rightmost field
|
||||
# in an IEEE 754 representation.
|
||||
& MANTISSA_MASK
|
||||
)
|
||||
@@ -0,0 +1,138 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from math import exp, floor, ldexp, log
|
||||
from threading import Lock
|
||||
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping import (
|
||||
Mapping,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.errors import (
|
||||
MappingOverflowError,
|
||||
MappingUnderflowError,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exponential_histogram.mapping.ieee_754 import (
|
||||
MAX_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_EXPONENT,
|
||||
MIN_NORMAL_VALUE,
|
||||
get_ieee_754_exponent,
|
||||
get_ieee_754_mantissa,
|
||||
)
|
||||
|
||||
|
||||
class LogarithmMapping(Mapping):
|
||||
# Reference implementation here:
|
||||
# https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go
|
||||
|
||||
_mappings = {}
|
||||
_mappings_lock = Lock()
|
||||
|
||||
_min_scale = 1
|
||||
_max_scale = 20
|
||||
|
||||
def _get_min_scale(self):
|
||||
# _min_scale ensures that ExponentMapping is used for zero and negative
|
||||
# scale values.
|
||||
return self._min_scale
|
||||
|
||||
def _get_max_scale(self):
|
||||
# FIXME The Go implementation uses a value of 20 here, find out the
|
||||
# right value for this implementation, more information here:
|
||||
# https://github.com/lightstep/otel-launcher-go/blob/c9ca8483be067a39ab306b09060446e7fda65f35/lightstep/sdk/metric/aggregator/histogram/structure/README.md#mapping-function
|
||||
# https://github.com/open-telemetry/opentelemetry-go/blob/0e6f9c29c10d6078e8131418e1d1d166c7195d61/sdk/metric/aggregator/exponential/mapping/logarithm/logarithm.go#L32-L45
|
||||
return self._max_scale
|
||||
|
||||
def _init(self, scale: int):
|
||||
# pylint: disable=attribute-defined-outside-init
|
||||
|
||||
super()._init(scale)
|
||||
|
||||
# self._scale_factor is defined as a multiplier because multiplication
|
||||
# is faster than division. self._scale_factor is defined as:
|
||||
# index = log(value) * self._scale_factor
|
||||
# Where:
|
||||
# index = log(value) / log(base)
|
||||
# index = log(value) / log(2 ** (2 ** -scale))
|
||||
# index = log(value) / ((2 ** -scale) * log(2))
|
||||
# index = log(value) * ((1 / log(2)) * (2 ** scale))
|
||||
# self._scale_factor = ((1 / log(2)) * (2 ** scale))
|
||||
# self._scale_factor = (1 /log(2)) * (2 ** scale)
|
||||
# self._scale_factor = ldexp(1 / log(2), scale)
|
||||
# This implementation was copied from a Java prototype. See:
|
||||
# https://github.com/newrelic-experimental/newrelic-sketch-java/blob/1ce245713603d61ba3a4510f6df930a5479cd3f6/src/main/java/com/newrelic/nrsketch/indexer/LogIndexer.java
|
||||
# for the equations used here.
|
||||
self._scale_factor = ldexp(1 / log(2), scale)
|
||||
|
||||
# self._min_normal_lower_boundary_index is the index such that
|
||||
# base ** index == MIN_NORMAL_VALUE. An exponential histogram bucket
|
||||
# with this index covers the range
|
||||
# (MIN_NORMAL_VALUE, MIN_NORMAL_VALUE * base]. One less than this index
|
||||
# corresponds with the bucket containing values <= MIN_NORMAL_VALUE.
|
||||
self._min_normal_lower_boundary_index = (
|
||||
MIN_NORMAL_EXPONENT << self._scale
|
||||
)
|
||||
|
||||
# self._max_normal_lower_boundary_index is the index such that
|
||||
# base ** index equals the greatest representable lower boundary. An
|
||||
# exponential histogram bucket with this index covers the range
|
||||
# ((2 ** 1024) / base, 2 ** 1024], which includes opentelemetry.sdk.
|
||||
# metrics._internal.exponential_histogram.ieee_754.MAX_NORMAL_VALUE.
|
||||
# This bucket is incomplete, since the upper boundary cannot be
|
||||
# represented. One greater than this index corresponds with the bucket
|
||||
# containing values > 2 ** 1024.
|
||||
self._max_normal_lower_boundary_index = (
|
||||
(MAX_NORMAL_EXPONENT + 1) << self._scale
|
||||
) - 1
|
||||
|
||||
def map_to_index(self, value: float) -> int:
|
||||
"""
|
||||
Maps positive floating point values to indexes corresponding to scale.
|
||||
"""
|
||||
|
||||
# value is subnormal
|
||||
if value <= MIN_NORMAL_VALUE:
|
||||
return self._min_normal_lower_boundary_index - 1
|
||||
|
||||
# value is an exact power of two.
|
||||
if get_ieee_754_mantissa(value) == 0:
|
||||
exponent = get_ieee_754_exponent(value)
|
||||
return (exponent << self._scale) - 1
|
||||
|
||||
return min(
|
||||
floor(log(value) * self._scale_factor),
|
||||
self._max_normal_lower_boundary_index,
|
||||
)
|
||||
|
||||
def get_lower_boundary(self, index: int) -> float:
|
||||
if index >= self._max_normal_lower_boundary_index:
|
||||
if index == self._max_normal_lower_boundary_index:
|
||||
return 2 * exp(
|
||||
(index - (1 << self._scale)) / self._scale_factor
|
||||
)
|
||||
raise MappingOverflowError()
|
||||
|
||||
if index <= self._min_normal_lower_boundary_index:
|
||||
if index == self._min_normal_lower_boundary_index:
|
||||
return MIN_NORMAL_VALUE
|
||||
if index == self._min_normal_lower_boundary_index - 1:
|
||||
return (
|
||||
exp((index + (1 << self._scale)) / self._scale_factor) / 2
|
||||
)
|
||||
raise MappingUnderflowError()
|
||||
|
||||
return exp(index / self._scale_factor)
|
||||
|
||||
@property
|
||||
def scale(self) -> int:
|
||||
return self._scale
|
||||
@@ -0,0 +1,576 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import math
|
||||
import os
|
||||
import weakref
|
||||
from abc import ABC, abstractmethod
|
||||
from enum import Enum
|
||||
from logging import getLogger
|
||||
from os import environ, linesep
|
||||
from sys import stdout
|
||||
from threading import Event, Lock, RLock, Thread
|
||||
from time import time_ns
|
||||
from typing import IO, Callable, Iterable, Optional
|
||||
|
||||
from typing_extensions import final
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics._internal
|
||||
from opentelemetry.context import (
|
||||
_SUPPRESS_INSTRUMENTATION_KEY,
|
||||
attach,
|
||||
detach,
|
||||
set_value,
|
||||
)
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_METRIC_EXPORT_INTERVAL,
|
||||
OTEL_METRIC_EXPORT_TIMEOUT,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
AggregationTemporality,
|
||||
DefaultAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
Counter,
|
||||
Gauge,
|
||||
Histogram,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
UpDownCounter,
|
||||
_Counter,
|
||||
_Gauge,
|
||||
_Histogram,
|
||||
_ObservableCounter,
|
||||
_ObservableGauge,
|
||||
_ObservableUpDownCounter,
|
||||
_UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.point import MetricsData
|
||||
from opentelemetry.util._once import Once
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class MetricExportResult(Enum):
|
||||
"""Result of exporting a metric
|
||||
|
||||
Can be any of the following values:"""
|
||||
|
||||
SUCCESS = 0
|
||||
FAILURE = 1
|
||||
|
||||
|
||||
class MetricExporter(ABC):
|
||||
"""Interface for exporting metrics.
|
||||
|
||||
Interface to be implemented by services that want to export metrics received
|
||||
in their own format.
|
||||
|
||||
Args:
|
||||
preferred_temporality: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to
|
||||
configure exporter level preferred temporality. See `opentelemetry.sdk.metrics.export.MetricReader` for
|
||||
more details on what preferred temporality is.
|
||||
preferred_aggregation: Used by `opentelemetry.sdk.metrics.export.PeriodicExportingMetricReader` to
|
||||
configure exporter level preferred aggregation. See `opentelemetry.sdk.metrics.export.MetricReader` for
|
||||
more details on what preferred aggregation is.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
) -> None:
|
||||
self._preferred_temporality = preferred_temporality
|
||||
self._preferred_aggregation = preferred_aggregation
|
||||
|
||||
@abstractmethod
|
||||
def export(
|
||||
self,
|
||||
metrics_data: MetricsData,
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> MetricExportResult:
|
||||
"""Exports a batch of telemetry data.
|
||||
|
||||
Args:
|
||||
metrics: The list of `opentelemetry.sdk.metrics.export.Metric` objects to be exported
|
||||
|
||||
Returns:
|
||||
The result of the export
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
"""
|
||||
Ensure that export of any metrics currently received by the exporter
|
||||
are completed as soon as possible.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
"""Shuts down the exporter.
|
||||
|
||||
Called when the SDK is shut down.
|
||||
"""
|
||||
|
||||
|
||||
class ConsoleMetricExporter(MetricExporter):
|
||||
"""Implementation of :class:`MetricExporter` that prints metrics to the
|
||||
console.
|
||||
|
||||
This class can be used for diagnostic purposes. It prints the exported
|
||||
metrics to the console STDOUT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
out: IO = stdout,
|
||||
formatter: Callable[
|
||||
["opentelemetry.sdk.metrics.export.MetricsData"], str
|
||||
] = lambda metrics_data: metrics_data.to_json() + linesep,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
):
|
||||
super().__init__(
|
||||
preferred_temporality=preferred_temporality,
|
||||
preferred_aggregation=preferred_aggregation,
|
||||
)
|
||||
self.out = out
|
||||
self.formatter = formatter
|
||||
|
||||
def export(
|
||||
self,
|
||||
metrics_data: MetricsData,
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> MetricExportResult:
|
||||
self.out.write(self.formatter(metrics_data))
|
||||
self.out.flush()
|
||||
return MetricExportResult.SUCCESS
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
pass
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
return True
|
||||
|
||||
|
||||
class MetricReader(ABC):
|
||||
# pylint: disable=too-many-branches,broad-exception-raised
|
||||
"""
|
||||
Base class for all metric readers
|
||||
|
||||
Args:
|
||||
preferred_temporality: A mapping between instrument classes and
|
||||
aggregation temporality. By default uses CUMULATIVE for all instrument
|
||||
classes. This mapping will be used to define the default aggregation
|
||||
temporality of every instrument class. If the user wants to make a
|
||||
change in the default aggregation temporality of an instrument class,
|
||||
it is enough to pass here a dictionary whose keys are the instrument
|
||||
classes and the values are the corresponding desired aggregation
|
||||
temporalities of the classes that the user wants to change, not all of
|
||||
them. The classes not included in the passed dictionary will retain
|
||||
their association to their default aggregation temporalities.
|
||||
preferred_aggregation: A mapping between instrument classes and
|
||||
aggregation instances. By default maps all instrument classes to an
|
||||
instance of `DefaultAggregation`. This mapping will be used to
|
||||
define the default aggregation of every instrument class. If the
|
||||
user wants to make a change in the default aggregation of an
|
||||
instrument class, it is enough to pass here a dictionary whose keys
|
||||
are the instrument classes and the values are the corresponding
|
||||
desired aggregation for the instrument classes that the user wants
|
||||
to change, not necessarily all of them. The classes not included in
|
||||
the passed dictionary will retain their association to their
|
||||
default aggregations. The aggregation defined here will be
|
||||
overridden by an aggregation defined by a view that is not
|
||||
`DefaultAggregation`.
|
||||
|
||||
.. document protected _receive_metrics which is a intended to be overridden by subclass
|
||||
.. automethod:: _receive_metrics
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
) -> None:
|
||||
self._collect: Callable[
|
||||
[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader",
|
||||
AggregationTemporality,
|
||||
],
|
||||
Iterable["opentelemetry.sdk.metrics.export.Metric"],
|
||||
] = None
|
||||
|
||||
self._instrument_class_temporality = {
|
||||
_Counter: AggregationTemporality.CUMULATIVE,
|
||||
_UpDownCounter: AggregationTemporality.CUMULATIVE,
|
||||
_Histogram: AggregationTemporality.CUMULATIVE,
|
||||
_Gauge: AggregationTemporality.CUMULATIVE,
|
||||
_ObservableCounter: AggregationTemporality.CUMULATIVE,
|
||||
_ObservableUpDownCounter: AggregationTemporality.CUMULATIVE,
|
||||
_ObservableGauge: AggregationTemporality.CUMULATIVE,
|
||||
}
|
||||
|
||||
if preferred_temporality is not None:
|
||||
for temporality in preferred_temporality.values():
|
||||
if temporality not in (
|
||||
AggregationTemporality.CUMULATIVE,
|
||||
AggregationTemporality.DELTA,
|
||||
):
|
||||
raise Exception(
|
||||
f"Invalid temporality value found {temporality}"
|
||||
)
|
||||
|
||||
if preferred_temporality is not None:
|
||||
for typ, temporality in preferred_temporality.items():
|
||||
if typ is Counter:
|
||||
self._instrument_class_temporality[_Counter] = temporality
|
||||
elif typ is UpDownCounter:
|
||||
self._instrument_class_temporality[_UpDownCounter] = (
|
||||
temporality
|
||||
)
|
||||
elif typ is Histogram:
|
||||
self._instrument_class_temporality[_Histogram] = (
|
||||
temporality
|
||||
)
|
||||
elif typ is Gauge:
|
||||
self._instrument_class_temporality[_Gauge] = temporality
|
||||
elif typ is ObservableCounter:
|
||||
self._instrument_class_temporality[_ObservableCounter] = (
|
||||
temporality
|
||||
)
|
||||
elif typ is ObservableUpDownCounter:
|
||||
self._instrument_class_temporality[
|
||||
_ObservableUpDownCounter
|
||||
] = temporality
|
||||
elif typ is ObservableGauge:
|
||||
self._instrument_class_temporality[_ObservableGauge] = (
|
||||
temporality
|
||||
)
|
||||
else:
|
||||
raise Exception(f"Invalid instrument class found {typ}")
|
||||
|
||||
self._preferred_temporality = preferred_temporality
|
||||
self._instrument_class_aggregation = {
|
||||
_Counter: DefaultAggregation(),
|
||||
_UpDownCounter: DefaultAggregation(),
|
||||
_Histogram: DefaultAggregation(),
|
||||
_Gauge: DefaultAggregation(),
|
||||
_ObservableCounter: DefaultAggregation(),
|
||||
_ObservableUpDownCounter: DefaultAggregation(),
|
||||
_ObservableGauge: DefaultAggregation(),
|
||||
}
|
||||
|
||||
if preferred_aggregation is not None:
|
||||
for typ, aggregation in preferred_aggregation.items():
|
||||
if typ is Counter:
|
||||
self._instrument_class_aggregation[_Counter] = aggregation
|
||||
elif typ is UpDownCounter:
|
||||
self._instrument_class_aggregation[_UpDownCounter] = (
|
||||
aggregation
|
||||
)
|
||||
elif typ is Histogram:
|
||||
self._instrument_class_aggregation[_Histogram] = (
|
||||
aggregation
|
||||
)
|
||||
elif typ is Gauge:
|
||||
self._instrument_class_aggregation[_Gauge] = aggregation
|
||||
elif typ is ObservableCounter:
|
||||
self._instrument_class_aggregation[_ObservableCounter] = (
|
||||
aggregation
|
||||
)
|
||||
elif typ is ObservableUpDownCounter:
|
||||
self._instrument_class_aggregation[
|
||||
_ObservableUpDownCounter
|
||||
] = aggregation
|
||||
elif typ is ObservableGauge:
|
||||
self._instrument_class_aggregation[_ObservableGauge] = (
|
||||
aggregation
|
||||
)
|
||||
else:
|
||||
raise Exception(f"Invalid instrument class found {typ}")
|
||||
|
||||
@final
|
||||
def collect(self, timeout_millis: float = 10_000) -> None:
|
||||
"""Collects the metrics from the internal SDK state and
|
||||
invokes the `_receive_metrics` with the collection.
|
||||
|
||||
Args:
|
||||
timeout_millis: Amount of time in milliseconds before this function
|
||||
raises a timeout error.
|
||||
|
||||
If any of the underlying ``collect`` methods called by this method
|
||||
fails by any reason (including timeout) an exception will be raised
|
||||
detailing the individual errors that caused this function to fail.
|
||||
"""
|
||||
if self._collect is None:
|
||||
_logger.warning(
|
||||
"Cannot call collect on a MetricReader until it is registered on a MeterProvider"
|
||||
)
|
||||
return
|
||||
|
||||
metrics = self._collect(self, timeout_millis=timeout_millis)
|
||||
|
||||
if metrics is not None:
|
||||
self._receive_metrics(
|
||||
metrics,
|
||||
timeout_millis=timeout_millis,
|
||||
)
|
||||
|
||||
@final
|
||||
def _set_collect_callback(
|
||||
self,
|
||||
func: Callable[
|
||||
[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader",
|
||||
AggregationTemporality,
|
||||
],
|
||||
Iterable["opentelemetry.sdk.metrics.export.Metric"],
|
||||
],
|
||||
) -> None:
|
||||
"""This function is internal to the SDK. It should not be called or overridden by users"""
|
||||
self._collect = func
|
||||
|
||||
@abstractmethod
|
||||
def _receive_metrics(
|
||||
self,
|
||||
metrics_data: "opentelemetry.sdk.metrics.export.MetricsData",
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
"""Called by `MetricReader.collect` when it receives a batch of metrics"""
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
self.collect(timeout_millis=timeout_millis)
|
||||
return True
|
||||
|
||||
@abstractmethod
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
"""Shuts down the MetricReader. This method provides a way
|
||||
for the MetricReader to do any cleanup required. A metric reader can
|
||||
only be shutdown once, any subsequent calls are ignored and return
|
||||
failure status.
|
||||
|
||||
When a `MetricReader` is registered on a
|
||||
:class:`~opentelemetry.sdk.metrics.MeterProvider`,
|
||||
:meth:`~opentelemetry.sdk.metrics.MeterProvider.shutdown` will invoke this
|
||||
automatically.
|
||||
"""
|
||||
|
||||
|
||||
class InMemoryMetricReader(MetricReader):
|
||||
"""Implementation of `MetricReader` that returns its metrics from :func:`get_metrics_data`.
|
||||
|
||||
This is useful for e.g. unit tests.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
preferred_temporality: dict[type, AggregationTemporality]
|
||||
| None = None,
|
||||
preferred_aggregation: dict[
|
||||
type, "opentelemetry.sdk.metrics.view.Aggregation"
|
||||
]
|
||||
| None = None,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
preferred_temporality=preferred_temporality,
|
||||
preferred_aggregation=preferred_aggregation,
|
||||
)
|
||||
self._lock = RLock()
|
||||
self._metrics_data: "opentelemetry.sdk.metrics.export.MetricsData" = (
|
||||
None
|
||||
)
|
||||
|
||||
def get_metrics_data(
|
||||
self,
|
||||
) -> Optional["opentelemetry.sdk.metrics.export.MetricsData"]:
|
||||
"""Reads and returns current metrics from the SDK"""
|
||||
with self._lock:
|
||||
self.collect()
|
||||
metrics_data = self._metrics_data
|
||||
self._metrics_data = None
|
||||
return metrics_data
|
||||
|
||||
def _receive_metrics(
|
||||
self,
|
||||
metrics_data: "opentelemetry.sdk.metrics.export.MetricsData",
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
with self._lock:
|
||||
self._metrics_data = metrics_data
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class PeriodicExportingMetricReader(MetricReader):
|
||||
"""`PeriodicExportingMetricReader` is an implementation of `MetricReader`
|
||||
that collects metrics based on a user-configurable time interval, and passes the
|
||||
metrics to the configured exporter. If the time interval is set to `math.inf`, the
|
||||
reader will not invoke periodic collection.
|
||||
|
||||
The configured exporter's :py:meth:`~MetricExporter.export` method will not be called
|
||||
concurrently.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
exporter: MetricExporter,
|
||||
export_interval_millis: Optional[float] = None,
|
||||
export_timeout_millis: Optional[float] = None,
|
||||
) -> None:
|
||||
# PeriodicExportingMetricReader defers to exporter for configuration
|
||||
super().__init__(
|
||||
preferred_temporality=exporter._preferred_temporality,
|
||||
preferred_aggregation=exporter._preferred_aggregation,
|
||||
)
|
||||
|
||||
# This lock is held whenever calling self._exporter.export() to prevent concurrent
|
||||
# execution of MetricExporter.export()
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/metrics/sdk.md#exportbatch
|
||||
self._export_lock = Lock()
|
||||
|
||||
self._exporter = exporter
|
||||
if export_interval_millis is None:
|
||||
try:
|
||||
export_interval_millis = float(
|
||||
environ.get(OTEL_METRIC_EXPORT_INTERVAL, 60000)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.warning(
|
||||
"Found invalid value for export interval, using default"
|
||||
)
|
||||
export_interval_millis = 60000
|
||||
if export_timeout_millis is None:
|
||||
try:
|
||||
export_timeout_millis = float(
|
||||
environ.get(OTEL_METRIC_EXPORT_TIMEOUT, 30000)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.warning(
|
||||
"Found invalid value for export timeout, using default"
|
||||
)
|
||||
export_timeout_millis = 30000
|
||||
self._export_interval_millis = export_interval_millis
|
||||
self._export_timeout_millis = export_timeout_millis
|
||||
self._shutdown = False
|
||||
self._shutdown_event = Event()
|
||||
self._shutdown_once = Once()
|
||||
self._daemon_thread = None
|
||||
if (
|
||||
self._export_interval_millis > 0
|
||||
and self._export_interval_millis < math.inf
|
||||
):
|
||||
self._daemon_thread = Thread(
|
||||
name="OtelPeriodicExportingMetricReader",
|
||||
target=self._ticker,
|
||||
daemon=True,
|
||||
)
|
||||
self._daemon_thread.start()
|
||||
if hasattr(os, "register_at_fork"):
|
||||
weak_at_fork = weakref.WeakMethod(self._at_fork_reinit)
|
||||
|
||||
os.register_at_fork(
|
||||
after_in_child=lambda: weak_at_fork()() # pylint: disable=unnecessary-lambda
|
||||
)
|
||||
elif self._export_interval_millis <= 0:
|
||||
raise ValueError(
|
||||
f"interval value {self._export_interval_millis} is invalid \
|
||||
and needs to be larger than zero."
|
||||
)
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
self._daemon_thread = Thread(
|
||||
name="OtelPeriodicExportingMetricReader",
|
||||
target=self._ticker,
|
||||
daemon=True,
|
||||
)
|
||||
self._daemon_thread.start()
|
||||
|
||||
def _ticker(self) -> None:
|
||||
interval_secs = self._export_interval_millis / 1e3
|
||||
while not self._shutdown_event.wait(interval_secs):
|
||||
try:
|
||||
self.collect(timeout_millis=self._export_timeout_millis)
|
||||
except MetricsTimeoutError:
|
||||
_logger.warning(
|
||||
"Metric collection timed out. Will try again after %s seconds",
|
||||
interval_secs,
|
||||
exc_info=True,
|
||||
)
|
||||
# one last collection below before shutting down completely
|
||||
try:
|
||||
self.collect(timeout_millis=self._export_interval_millis)
|
||||
except MetricsTimeoutError:
|
||||
_logger.warning(
|
||||
"Metric collection timed out.",
|
||||
exc_info=True,
|
||||
)
|
||||
|
||||
def _receive_metrics(
|
||||
self,
|
||||
metrics_data: MetricsData,
|
||||
timeout_millis: float = 10_000,
|
||||
**kwargs,
|
||||
) -> None:
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
# pylint: disable=broad-exception-caught,invalid-name
|
||||
try:
|
||||
with self._export_lock:
|
||||
self._exporter.export(
|
||||
metrics_data, timeout_millis=timeout_millis
|
||||
)
|
||||
except Exception:
|
||||
_logger.exception("Exception while exporting metrics")
|
||||
detach(token)
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000, **kwargs) -> None:
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
def _shutdown():
|
||||
self._shutdown = True
|
||||
|
||||
did_set = self._shutdown_once.do_once(_shutdown)
|
||||
if not did_set:
|
||||
_logger.warning("Can't shutdown multiple times")
|
||||
return
|
||||
|
||||
self._shutdown_event.set()
|
||||
if self._daemon_thread:
|
||||
self._daemon_thread.join(timeout=(deadline_ns - time_ns()) / 10**9)
|
||||
self._exporter.shutdown(timeout=(deadline_ns - time_ns()) / 10**6)
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
super().force_flush(timeout_millis=timeout_millis)
|
||||
self._exporter.force_flush(timeout_millis=timeout_millis)
|
||||
return True
|
||||
Binary file not shown.
@@ -0,0 +1,334 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=too-many-ancestors, unused-import
|
||||
from __future__ import annotations
|
||||
|
||||
from logging import getLogger
|
||||
from time import time_ns
|
||||
from typing import Generator, Iterable, List, Sequence, Union
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
from opentelemetry.context import Context, get_current
|
||||
from opentelemetry.metrics import CallbackT
|
||||
from opentelemetry.metrics import Counter as APICounter
|
||||
from opentelemetry.metrics import Histogram as APIHistogram
|
||||
from opentelemetry.metrics import ObservableCounter as APIObservableCounter
|
||||
from opentelemetry.metrics import ObservableGauge as APIObservableGauge
|
||||
from opentelemetry.metrics import (
|
||||
ObservableUpDownCounter as APIObservableUpDownCounter,
|
||||
)
|
||||
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
|
||||
from opentelemetry.metrics import _Gauge as APIGauge
|
||||
from opentelemetry.metrics._internal.instrument import (
|
||||
CallbackOptions,
|
||||
_MetricsHistogramAdvisory,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
_ERROR_MESSAGE = (
|
||||
"Expected ASCII string of maximum length 63 characters but got {}"
|
||||
)
|
||||
|
||||
|
||||
class _Synchronous:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
result = self._check_name_unit_description(name, unit, description)
|
||||
|
||||
if result["name"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(name))
|
||||
|
||||
if result["unit"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(unit))
|
||||
|
||||
name = result["name"]
|
||||
unit = result["unit"]
|
||||
description = result["description"]
|
||||
|
||||
self.name = name.lower()
|
||||
self.unit = unit
|
||||
self.description = description
|
||||
self.instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
|
||||
|
||||
class _Asynchronous:
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
|
||||
callbacks: Iterable[CallbackT] | None = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
):
|
||||
# pylint: disable=no-member
|
||||
result = self._check_name_unit_description(name, unit, description)
|
||||
|
||||
if result["name"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(name))
|
||||
|
||||
if result["unit"] is None:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(_ERROR_MESSAGE.format(unit))
|
||||
|
||||
name = result["name"]
|
||||
unit = result["unit"]
|
||||
description = result["description"]
|
||||
|
||||
self.name = name.lower()
|
||||
self.unit = unit
|
||||
self.description = description
|
||||
self.instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
super().__init__(name, callbacks, unit=unit, description=description)
|
||||
|
||||
self._callbacks: List[CallbackT] = []
|
||||
|
||||
if callbacks is not None:
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, Generator):
|
||||
# advance generator to it's first yield
|
||||
next(callback)
|
||||
|
||||
def inner(
|
||||
options: CallbackOptions,
|
||||
callback=callback,
|
||||
) -> Iterable[Measurement]:
|
||||
try:
|
||||
return callback.send(options)
|
||||
except StopIteration:
|
||||
return []
|
||||
|
||||
self._callbacks.append(inner)
|
||||
else:
|
||||
self._callbacks.append(callback)
|
||||
|
||||
def callback(
|
||||
self, callback_options: CallbackOptions
|
||||
) -> Iterable[Measurement]:
|
||||
for callback in self._callbacks:
|
||||
try:
|
||||
for api_measurement in callback(callback_options):
|
||||
yield Measurement(
|
||||
api_measurement.value,
|
||||
time_unix_nano=time_ns(),
|
||||
instrument=self,
|
||||
context=api_measurement.context or get_current(),
|
||||
attributes=api_measurement.attributes,
|
||||
)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.exception(
|
||||
"Callback failed for instrument %s.", self.name
|
||||
)
|
||||
|
||||
|
||||
class Counter(_Synchronous, APICounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is Counter:
|
||||
raise TypeError("Counter must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
if amount < 0:
|
||||
_logger.warning(
|
||||
"Add amount must be non-negative on Counter %s.", self.name
|
||||
)
|
||||
return
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class UpDownCounter(_Synchronous, APIUpDownCounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is UpDownCounter:
|
||||
raise TypeError("UpDownCounter must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ObservableCounter(_Asynchronous, APIObservableCounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is ObservableCounter:
|
||||
raise TypeError(
|
||||
"ObservableCounter must be instantiated via a meter."
|
||||
)
|
||||
return super().__new__(cls)
|
||||
|
||||
|
||||
class ObservableUpDownCounter(_Asynchronous, APIObservableUpDownCounter):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is ObservableUpDownCounter:
|
||||
raise TypeError(
|
||||
"ObservableUpDownCounter must be instantiated via a meter."
|
||||
)
|
||||
return super().__new__(cls)
|
||||
|
||||
|
||||
class Histogram(_Synchronous, APIHistogram):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: "opentelemetry.sdk.metrics.MeasurementConsumer",
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
explicit_bucket_boundaries_advisory: Sequence[float] | None = None,
|
||||
):
|
||||
super().__init__(
|
||||
name,
|
||||
unit=unit,
|
||||
description=description,
|
||||
instrumentation_scope=instrumentation_scope,
|
||||
measurement_consumer=measurement_consumer,
|
||||
)
|
||||
self._advisory = _MetricsHistogramAdvisory(
|
||||
explicit_bucket_boundaries=explicit_bucket_boundaries_advisory
|
||||
)
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is Histogram:
|
||||
raise TypeError("Histogram must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def record(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
if amount < 0:
|
||||
_logger.warning(
|
||||
"Record amount must be non-negative on Histogram %s.",
|
||||
self.name,
|
||||
)
|
||||
return
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class Gauge(_Synchronous, APIGauge):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is Gauge:
|
||||
raise TypeError("Gauge must be instantiated via a meter.")
|
||||
return super().__new__(cls)
|
||||
|
||||
def set(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: dict[str, str] | None = None,
|
||||
context: Context | None = None,
|
||||
):
|
||||
time_unix_nano = time_ns()
|
||||
self._measurement_consumer.consume_measurement(
|
||||
Measurement(
|
||||
amount,
|
||||
time_unix_nano,
|
||||
self,
|
||||
context or get_current(),
|
||||
attributes,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class ObservableGauge(_Asynchronous, APIObservableGauge):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls is ObservableGauge:
|
||||
raise TypeError(
|
||||
"ObservableGauge must be instantiated via a meter."
|
||||
)
|
||||
return super().__new__(cls)
|
||||
|
||||
|
||||
# Below classes exist to prevent the direct instantiation
|
||||
class _Counter(Counter):
|
||||
pass
|
||||
|
||||
|
||||
class _UpDownCounter(UpDownCounter):
|
||||
pass
|
||||
|
||||
|
||||
class _ObservableCounter(ObservableCounter):
|
||||
pass
|
||||
|
||||
|
||||
class _ObservableUpDownCounter(ObservableUpDownCounter):
|
||||
pass
|
||||
|
||||
|
||||
class _Histogram(Histogram):
|
||||
pass
|
||||
|
||||
|
||||
class _Gauge(Gauge):
|
||||
pass
|
||||
|
||||
|
||||
class _ObservableGauge(ObservableGauge):
|
||||
pass
|
||||
@@ -0,0 +1,45 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Union
|
||||
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.metrics import Instrument
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Measurement:
|
||||
"""
|
||||
Represents a data point reported via the metrics API to the SDK.
|
||||
|
||||
Attributes
|
||||
value: Measured value
|
||||
time_unix_nano: The time the API call was made to record the Measurement
|
||||
instrument: The instrument that produced this `Measurement`.
|
||||
context: The active Context of the Measurement at API call time.
|
||||
attributes: Measurement attributes
|
||||
"""
|
||||
|
||||
# TODO Fix doc - if using valid Google `Attributes:` key, the attributes are duplicated
|
||||
# one will come from napoleon extension and the other from autodoc extension. This
|
||||
# will raise an sphinx error of duplicated object description
|
||||
# See https://github.com/sphinx-doc/sphinx/issues/8664
|
||||
|
||||
value: Union[int, float]
|
||||
time_unix_nano: int
|
||||
instrument: Instrument
|
||||
context: Context
|
||||
attributes: Attributes = None
|
||||
@@ -0,0 +1,145 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Iterable, List, Mapping, Optional
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
import opentelemetry.sdk.metrics._internal.instrument
|
||||
import opentelemetry.sdk.metrics._internal.sdk_configuration
|
||||
from opentelemetry.metrics._internal.instrument import CallbackOptions
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.metrics._internal.metric_reader_storage import (
|
||||
MetricReaderStorage,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.point import Metric
|
||||
|
||||
|
||||
class MeasurementConsumer(ABC):
|
||||
@abstractmethod
|
||||
def consume_measurement(self, measurement: Measurement) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def register_asynchronous_instrument(
|
||||
self,
|
||||
instrument: (
|
||||
"opentelemetry.sdk.metrics._internal.instrument_Asynchronous"
|
||||
),
|
||||
):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def collect(
|
||||
self,
|
||||
metric_reader: "opentelemetry.sdk.metrics.MetricReader",
|
||||
timeout_millis: float = 10_000,
|
||||
) -> Optional[Iterable[Metric]]:
|
||||
pass
|
||||
|
||||
|
||||
class SynchronousMeasurementConsumer(MeasurementConsumer):
|
||||
def __init__(
|
||||
self,
|
||||
sdk_config: "opentelemetry.sdk.metrics._internal.SdkConfiguration",
|
||||
) -> None:
|
||||
self._lock = Lock()
|
||||
self._sdk_config = sdk_config
|
||||
# should never be mutated
|
||||
self._reader_storages: Mapping[
|
||||
"opentelemetry.sdk.metrics.MetricReader", MetricReaderStorage
|
||||
] = {
|
||||
reader: MetricReaderStorage(
|
||||
sdk_config,
|
||||
reader._instrument_class_temporality,
|
||||
reader._instrument_class_aggregation,
|
||||
)
|
||||
for reader in sdk_config.metric_readers
|
||||
}
|
||||
self._async_instruments: List[
|
||||
"opentelemetry.sdk.metrics._internal.instrument._Asynchronous"
|
||||
] = []
|
||||
|
||||
def consume_measurement(self, measurement: Measurement) -> None:
|
||||
should_sample_exemplar = (
|
||||
self._sdk_config.exemplar_filter.should_sample(
|
||||
measurement.value,
|
||||
measurement.time_unix_nano,
|
||||
measurement.attributes,
|
||||
measurement.context,
|
||||
)
|
||||
)
|
||||
for reader_storage in self._reader_storages.values():
|
||||
reader_storage.consume_measurement(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
def register_asynchronous_instrument(
|
||||
self,
|
||||
instrument: (
|
||||
"opentelemetry.sdk.metrics._internal.instrument._Asynchronous"
|
||||
),
|
||||
) -> None:
|
||||
with self._lock:
|
||||
self._async_instruments.append(instrument)
|
||||
|
||||
def collect(
|
||||
self,
|
||||
metric_reader: "opentelemetry.sdk.metrics.MetricReader",
|
||||
timeout_millis: float = 10_000,
|
||||
) -> Optional[Iterable[Metric]]:
|
||||
with self._lock:
|
||||
metric_reader_storage = self._reader_storages[metric_reader]
|
||||
# for now, just use the defaults
|
||||
callback_options = CallbackOptions()
|
||||
deadline_ns = time_ns() + (timeout_millis * 1e6)
|
||||
|
||||
default_timeout_ns = 10000 * 1e6
|
||||
|
||||
for async_instrument in self._async_instruments:
|
||||
remaining_time = deadline_ns - time_ns()
|
||||
|
||||
if remaining_time < default_timeout_ns:
|
||||
callback_options = CallbackOptions(
|
||||
timeout_millis=remaining_time / 1e6
|
||||
)
|
||||
|
||||
measurements = async_instrument.callback(callback_options)
|
||||
if time_ns() >= deadline_ns:
|
||||
raise MetricsTimeoutError(
|
||||
"Timed out while executing callback"
|
||||
)
|
||||
|
||||
for measurement in measurements:
|
||||
should_sample_exemplar = (
|
||||
self._sdk_config.exemplar_filter.should_sample(
|
||||
measurement.value,
|
||||
measurement.time_unix_nano,
|
||||
measurement.attributes,
|
||||
measurement.context,
|
||||
)
|
||||
)
|
||||
metric_reader_storage.consume_measurement(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
result = self._reader_storages[metric_reader].collect()
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,315 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from logging import getLogger
|
||||
from threading import RLock
|
||||
from time import time_ns
|
||||
from typing import Dict, List, Optional
|
||||
|
||||
from opentelemetry.metrics import (
|
||||
Asynchronous,
|
||||
Counter,
|
||||
Instrument,
|
||||
ObservableCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal._view_instrument_match import (
|
||||
_ViewInstrumentMatch,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
ExplicitBucketHistogramAggregation,
|
||||
_DropAggregation,
|
||||
_ExplicitBucketHistogramAggregation,
|
||||
_ExponentialBucketHistogramAggregation,
|
||||
_LastValueAggregation,
|
||||
_SumAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.export import AggregationTemporality
|
||||
from opentelemetry.sdk.metrics._internal.measurement import Measurement
|
||||
from opentelemetry.sdk.metrics._internal.point import (
|
||||
ExponentialHistogram,
|
||||
Gauge,
|
||||
Histogram,
|
||||
Metric,
|
||||
MetricsData,
|
||||
ResourceMetrics,
|
||||
ScopeMetrics,
|
||||
Sum,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
|
||||
SdkConfiguration,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.view import View
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
_DEFAULT_VIEW = View(instrument_name="")
|
||||
|
||||
|
||||
class MetricReaderStorage:
|
||||
"""The SDK's storage for a given reader"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
sdk_config: SdkConfiguration,
|
||||
instrument_class_temporality: Dict[type, AggregationTemporality],
|
||||
instrument_class_aggregation: Dict[type, Aggregation],
|
||||
) -> None:
|
||||
self._lock = RLock()
|
||||
self._sdk_config = sdk_config
|
||||
self._instrument_view_instrument_matches: Dict[
|
||||
Instrument, List[_ViewInstrumentMatch]
|
||||
] = {}
|
||||
self._instrument_class_temporality = instrument_class_temporality
|
||||
self._instrument_class_aggregation = instrument_class_aggregation
|
||||
|
||||
def _get_or_init_view_instrument_match(
|
||||
self, instrument: Instrument
|
||||
) -> List[_ViewInstrumentMatch]:
|
||||
# Optimistically get the relevant views for the given instrument. Once set for a given
|
||||
# instrument, the mapping will never change
|
||||
|
||||
if instrument in self._instrument_view_instrument_matches:
|
||||
return self._instrument_view_instrument_matches[instrument]
|
||||
|
||||
with self._lock:
|
||||
# double check if it was set before we held the lock
|
||||
if instrument in self._instrument_view_instrument_matches:
|
||||
return self._instrument_view_instrument_matches[instrument]
|
||||
|
||||
# not present, hold the lock and add a new mapping
|
||||
view_instrument_matches = []
|
||||
|
||||
self._handle_view_instrument_match(
|
||||
instrument, view_instrument_matches
|
||||
)
|
||||
|
||||
# if no view targeted the instrument, use the default
|
||||
if not view_instrument_matches:
|
||||
view_instrument_matches.append(
|
||||
_ViewInstrumentMatch(
|
||||
view=_DEFAULT_VIEW,
|
||||
instrument=instrument,
|
||||
instrument_class_aggregation=(
|
||||
self._instrument_class_aggregation
|
||||
),
|
||||
)
|
||||
)
|
||||
self._instrument_view_instrument_matches[instrument] = (
|
||||
view_instrument_matches
|
||||
)
|
||||
|
||||
return view_instrument_matches
|
||||
|
||||
def consume_measurement(
|
||||
self, measurement: Measurement, should_sample_exemplar: bool = True
|
||||
) -> None:
|
||||
for view_instrument_match in self._get_or_init_view_instrument_match(
|
||||
measurement.instrument
|
||||
):
|
||||
view_instrument_match.consume_measurement(
|
||||
measurement, should_sample_exemplar
|
||||
)
|
||||
|
||||
def collect(self) -> Optional[MetricsData]:
|
||||
# Use a list instead of yielding to prevent a slow reader from holding
|
||||
# SDK locks
|
||||
|
||||
# While holding the lock, new _ViewInstrumentMatch can't be added from
|
||||
# another thread (so we are sure we collect all existing view).
|
||||
# However, instruments can still send measurements that will make it
|
||||
# into the individual aggregations; collection will acquire those locks
|
||||
# iteratively to keep locking as fine-grained as possible. One side
|
||||
# effect is that end times can be slightly skewed among the metric
|
||||
# streams produced by the SDK, but we still align the output timestamps
|
||||
# for a single instrument.
|
||||
|
||||
collection_start_nanos = time_ns()
|
||||
|
||||
with self._lock:
|
||||
instrumentation_scope_scope_metrics: Dict[
|
||||
InstrumentationScope, ScopeMetrics
|
||||
] = {}
|
||||
|
||||
for (
|
||||
instrument,
|
||||
view_instrument_matches,
|
||||
) in self._instrument_view_instrument_matches.items():
|
||||
aggregation_temporality = self._instrument_class_temporality[
|
||||
instrument.__class__
|
||||
]
|
||||
|
||||
metrics: List[Metric] = []
|
||||
|
||||
for view_instrument_match in view_instrument_matches:
|
||||
data_points = view_instrument_match.collect(
|
||||
aggregation_temporality, collection_start_nanos
|
||||
)
|
||||
|
||||
if data_points is None:
|
||||
continue
|
||||
|
||||
if isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_SumAggregation,
|
||||
):
|
||||
data = Sum(
|
||||
aggregation_temporality=aggregation_temporality,
|
||||
data_points=data_points,
|
||||
is_monotonic=isinstance(
|
||||
instrument, (Counter, ObservableCounter)
|
||||
),
|
||||
)
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_LastValueAggregation,
|
||||
):
|
||||
data = Gauge(data_points=data_points)
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_ExplicitBucketHistogramAggregation,
|
||||
):
|
||||
data = Histogram(
|
||||
data_points=data_points,
|
||||
aggregation_temporality=aggregation_temporality,
|
||||
)
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_DropAggregation,
|
||||
):
|
||||
continue
|
||||
|
||||
elif isinstance(
|
||||
# pylint: disable=protected-access
|
||||
view_instrument_match._aggregation,
|
||||
_ExponentialBucketHistogramAggregation,
|
||||
):
|
||||
data = ExponentialHistogram(
|
||||
data_points=data_points,
|
||||
aggregation_temporality=aggregation_temporality,
|
||||
)
|
||||
|
||||
metrics.append(
|
||||
Metric(
|
||||
# pylint: disable=protected-access
|
||||
# pylint: disable=possibly-used-before-assignment
|
||||
name=view_instrument_match._name,
|
||||
description=view_instrument_match._description,
|
||||
unit=view_instrument_match._instrument.unit,
|
||||
data=data,
|
||||
)
|
||||
)
|
||||
|
||||
if metrics:
|
||||
if instrument.instrumentation_scope not in (
|
||||
instrumentation_scope_scope_metrics
|
||||
):
|
||||
instrumentation_scope_scope_metrics[
|
||||
instrument.instrumentation_scope
|
||||
] = ScopeMetrics(
|
||||
scope=instrument.instrumentation_scope,
|
||||
metrics=metrics,
|
||||
schema_url=instrument.instrumentation_scope.schema_url,
|
||||
)
|
||||
else:
|
||||
instrumentation_scope_scope_metrics[
|
||||
instrument.instrumentation_scope
|
||||
].metrics.extend(metrics)
|
||||
|
||||
if instrumentation_scope_scope_metrics:
|
||||
return MetricsData(
|
||||
resource_metrics=[
|
||||
ResourceMetrics(
|
||||
resource=self._sdk_config.resource,
|
||||
scope_metrics=list(
|
||||
instrumentation_scope_scope_metrics.values()
|
||||
),
|
||||
schema_url=self._sdk_config.resource.schema_url,
|
||||
)
|
||||
]
|
||||
)
|
||||
|
||||
return None
|
||||
|
||||
def _handle_view_instrument_match(
|
||||
self,
|
||||
instrument: Instrument,
|
||||
view_instrument_matches: List["_ViewInstrumentMatch"],
|
||||
) -> None:
|
||||
for view in self._sdk_config.views:
|
||||
# pylint: disable=protected-access
|
||||
if not view._match(instrument):
|
||||
continue
|
||||
|
||||
if not self._check_view_instrument_compatibility(view, instrument):
|
||||
continue
|
||||
|
||||
new_view_instrument_match = _ViewInstrumentMatch(
|
||||
view=view,
|
||||
instrument=instrument,
|
||||
instrument_class_aggregation=(
|
||||
self._instrument_class_aggregation
|
||||
),
|
||||
)
|
||||
|
||||
for (
|
||||
existing_view_instrument_matches
|
||||
) in self._instrument_view_instrument_matches.values():
|
||||
for (
|
||||
existing_view_instrument_match
|
||||
) in existing_view_instrument_matches:
|
||||
if existing_view_instrument_match.conflicts(
|
||||
new_view_instrument_match
|
||||
):
|
||||
_logger.warning(
|
||||
"Views %s and %s will cause conflicting "
|
||||
"metrics identities",
|
||||
existing_view_instrument_match._view,
|
||||
new_view_instrument_match._view,
|
||||
)
|
||||
|
||||
view_instrument_matches.append(new_view_instrument_match)
|
||||
|
||||
@staticmethod
|
||||
def _check_view_instrument_compatibility(
|
||||
view: View, instrument: Instrument
|
||||
) -> bool:
|
||||
"""
|
||||
Checks if a view and an instrument are compatible.
|
||||
|
||||
Returns `true` if they are compatible and a `_ViewInstrumentMatch`
|
||||
object should be created, `false` otherwise.
|
||||
"""
|
||||
|
||||
result = True
|
||||
|
||||
# pylint: disable=protected-access
|
||||
if isinstance(instrument, Asynchronous) and isinstance(
|
||||
view._aggregation, ExplicitBucketHistogramAggregation
|
||||
):
|
||||
_logger.warning(
|
||||
"View %s and instrument %s will produce "
|
||||
"semantic errors when matched, the view "
|
||||
"has not been applied.",
|
||||
view,
|
||||
instrument,
|
||||
)
|
||||
result = False
|
||||
|
||||
return result
|
||||
@@ -0,0 +1,277 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
from dataclasses import asdict, dataclass, field
|
||||
from json import dumps, loads
|
||||
from typing import Optional, Sequence, Union
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics._internal
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import Exemplar
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class NumberDataPoint:
|
||||
"""Single data point in a timeseries that describes the time-varying scalar
|
||||
value of a metric.
|
||||
"""
|
||||
|
||||
attributes: Attributes
|
||||
start_time_unix_nano: int
|
||||
time_unix_nano: int
|
||||
value: Union[int, float]
|
||||
exemplars: Sequence[Exemplar] = field(default_factory=list)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(asdict(self), indent=indent)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class HistogramDataPoint:
|
||||
"""Single data point in a timeseries that describes the time-varying scalar
|
||||
value of a metric.
|
||||
"""
|
||||
|
||||
attributes: Attributes
|
||||
start_time_unix_nano: int
|
||||
time_unix_nano: int
|
||||
count: int
|
||||
sum: Union[int, float]
|
||||
bucket_counts: Sequence[int]
|
||||
explicit_bounds: Sequence[float]
|
||||
min: float
|
||||
max: float
|
||||
exemplars: Sequence[Exemplar] = field(default_factory=list)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(asdict(self), indent=indent)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Buckets:
|
||||
offset: int
|
||||
bucket_counts: Sequence[int]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExponentialHistogramDataPoint:
|
||||
"""Single data point in a timeseries whose boundaries are defined by an
|
||||
exponential function. This timeseries describes the time-varying scalar
|
||||
value of a metric.
|
||||
"""
|
||||
|
||||
attributes: Attributes
|
||||
start_time_unix_nano: int
|
||||
time_unix_nano: int
|
||||
count: int
|
||||
sum: Union[int, float]
|
||||
scale: int
|
||||
zero_count: int
|
||||
positive: Buckets
|
||||
negative: Buckets
|
||||
flags: int
|
||||
min: float
|
||||
max: float
|
||||
exemplars: Sequence[Exemplar] = field(default_factory=list)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(asdict(self), indent=indent)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ExponentialHistogram:
|
||||
"""Represents the type of a metric that is calculated by aggregating as an
|
||||
ExponentialHistogram of all reported measurements over a time interval.
|
||||
"""
|
||||
|
||||
data_points: Sequence[ExponentialHistogramDataPoint]
|
||||
aggregation_temporality: (
|
||||
"opentelemetry.sdk.metrics.export.AggregationTemporality"
|
||||
)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
"aggregation_temporality": self.aggregation_temporality,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Sum:
|
||||
"""Represents the type of a scalar metric that is calculated as a sum of
|
||||
all reported measurements over a time interval."""
|
||||
|
||||
data_points: Sequence[NumberDataPoint]
|
||||
aggregation_temporality: (
|
||||
"opentelemetry.sdk.metrics.export.AggregationTemporality"
|
||||
)
|
||||
is_monotonic: bool
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
"aggregation_temporality": self.aggregation_temporality,
|
||||
"is_monotonic": self.is_monotonic,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Gauge:
|
||||
"""Represents the type of a scalar metric that always exports the current
|
||||
value for every data point. It should be used for an unknown
|
||||
aggregation."""
|
||||
|
||||
data_points: Sequence[NumberDataPoint]
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Histogram:
|
||||
"""Represents the type of a metric that is calculated by aggregating as a
|
||||
histogram of all reported measurements over a time interval."""
|
||||
|
||||
data_points: Sequence[HistogramDataPoint]
|
||||
aggregation_temporality: (
|
||||
"opentelemetry.sdk.metrics.export.AggregationTemporality"
|
||||
)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"data_points": [
|
||||
loads(data_point.to_json(indent=indent))
|
||||
for data_point in self.data_points
|
||||
],
|
||||
"aggregation_temporality": self.aggregation_temporality,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
DataT = Union[Sum, Gauge, Histogram, ExponentialHistogram]
|
||||
DataPointT = Union[
|
||||
NumberDataPoint, HistogramDataPoint, ExponentialHistogramDataPoint
|
||||
]
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class Metric:
|
||||
"""Represents a metric point in the OpenTelemetry data model to be
|
||||
exported."""
|
||||
|
||||
name: str
|
||||
description: Optional[str]
|
||||
unit: Optional[str]
|
||||
data: DataT
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"name": self.name,
|
||||
"description": self.description or "",
|
||||
"unit": self.unit or "",
|
||||
"data": loads(self.data.to_json(indent=indent)),
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ScopeMetrics:
|
||||
"""A collection of Metrics produced by a scope"""
|
||||
|
||||
scope: InstrumentationScope
|
||||
metrics: Sequence[Metric]
|
||||
schema_url: str
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"scope": loads(self.scope.to_json(indent=indent)),
|
||||
"metrics": [
|
||||
loads(metric.to_json(indent=indent))
|
||||
for metric in self.metrics
|
||||
],
|
||||
"schema_url": self.schema_url,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ResourceMetrics:
|
||||
"""A collection of ScopeMetrics from a Resource"""
|
||||
|
||||
resource: Resource
|
||||
scope_metrics: Sequence[ScopeMetrics]
|
||||
schema_url: str
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"resource": loads(self.resource.to_json(indent=indent)),
|
||||
"scope_metrics": [
|
||||
loads(scope_metrics.to_json(indent=indent))
|
||||
for scope_metrics in self.scope_metrics
|
||||
],
|
||||
"schema_url": self.schema_url,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class MetricsData:
|
||||
"""An array of ResourceMetrics"""
|
||||
|
||||
resource_metrics: Sequence[ResourceMetrics]
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"resource_metrics": [
|
||||
loads(resource_metrics.to_json(indent=indent))
|
||||
for resource_metrics in self.resource_metrics
|
||||
]
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
@@ -0,0 +1,30 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=unused-import
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Sequence
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
import opentelemetry.sdk.resources
|
||||
|
||||
|
||||
@dataclass
|
||||
class SdkConfiguration:
|
||||
exemplar_filter: "opentelemetry.sdk.metrics.ExemplarFilter"
|
||||
resource: "opentelemetry.sdk.resources.Resource"
|
||||
metric_readers: Sequence["opentelemetry.sdk.metrics.MetricReader"]
|
||||
views: Sequence["opentelemetry.sdk.metrics.View"]
|
||||
@@ -0,0 +1,195 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from fnmatch import fnmatch
|
||||
from logging import getLogger
|
||||
from typing import Callable, Optional, Set, Type
|
||||
|
||||
from opentelemetry.metrics import Instrument
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
DefaultAggregation,
|
||||
_Aggregation,
|
||||
_ExplicitBucketHistogramAggregation,
|
||||
_ExponentialBucketHistogramAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlignedHistogramBucketExemplarReservoir,
|
||||
ExemplarReservoirBuilder,
|
||||
SimpleFixedSizeExemplarReservoir,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
def _default_reservoir_factory(
|
||||
aggregation_type: Type[_Aggregation],
|
||||
) -> ExemplarReservoirBuilder:
|
||||
"""Default reservoir factory per aggregation."""
|
||||
if issubclass(aggregation_type, _ExplicitBucketHistogramAggregation):
|
||||
return AlignedHistogramBucketExemplarReservoir
|
||||
if issubclass(aggregation_type, _ExponentialBucketHistogramAggregation):
|
||||
return SimpleFixedSizeExemplarReservoir
|
||||
return SimpleFixedSizeExemplarReservoir
|
||||
|
||||
|
||||
class View:
|
||||
"""
|
||||
A `View` configuration parameters can be used for the following
|
||||
purposes:
|
||||
|
||||
1. Match instruments: When an instrument matches a view, measurements
|
||||
received by that instrument will be processed.
|
||||
2. Customize metric streams: A metric stream is identified by a match
|
||||
between a view and an instrument and a set of attributes. The metric
|
||||
stream can be customized by certain attributes of the corresponding view.
|
||||
|
||||
The attributes documented next serve one of the previous two purposes.
|
||||
|
||||
Args:
|
||||
instrument_type: This is an instrument matching attribute: the class the
|
||||
instrument must be to match the view.
|
||||
|
||||
instrument_name: This is an instrument matching attribute: the name the
|
||||
instrument must have to match the view. Wild card characters are supported. Wild
|
||||
card characters should not be used with this attribute if the view has also a
|
||||
``name`` defined.
|
||||
|
||||
meter_name: This is an instrument matching attribute: the name the
|
||||
instrument meter must have to match the view.
|
||||
|
||||
meter_version: This is an instrument matching attribute: the version
|
||||
the instrument meter must have to match the view.
|
||||
|
||||
meter_schema_url: This is an instrument matching attribute: the schema
|
||||
URL the instrument meter must have to match the view.
|
||||
|
||||
name: This is a metric stream customizing attribute: the name of the
|
||||
metric stream. If `None`, the name of the instrument will be used.
|
||||
|
||||
description: This is a metric stream customizing attribute: the
|
||||
description of the metric stream. If `None`, the description of the instrument will
|
||||
be used.
|
||||
|
||||
attribute_keys: This is a metric stream customizing attribute: this is
|
||||
a set of attribute keys. If not `None` then only the measurement attributes that
|
||||
are in ``attribute_keys`` will be used to identify the metric stream.
|
||||
|
||||
aggregation: This is a metric stream customizing attribute: the
|
||||
aggregation instance to use when data is aggregated for the
|
||||
corresponding metrics stream. If `None` an instance of
|
||||
`DefaultAggregation` will be used.
|
||||
|
||||
exemplar_reservoir_factory: This is a metric stream customizing attribute:
|
||||
the exemplar reservoir factory
|
||||
|
||||
instrument_unit: This is an instrument matching attribute: the unit the
|
||||
instrument must have to match the view.
|
||||
|
||||
This class is not intended to be subclassed by the user.
|
||||
"""
|
||||
|
||||
_default_aggregation = DefaultAggregation()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
instrument_type: Optional[Type[Instrument]] = None,
|
||||
instrument_name: Optional[str] = None,
|
||||
meter_name: Optional[str] = None,
|
||||
meter_version: Optional[str] = None,
|
||||
meter_schema_url: Optional[str] = None,
|
||||
name: Optional[str] = None,
|
||||
description: Optional[str] = None,
|
||||
attribute_keys: Optional[Set[str]] = None,
|
||||
aggregation: Optional[Aggregation] = None,
|
||||
exemplar_reservoir_factory: Optional[
|
||||
Callable[[Type[_Aggregation]], ExemplarReservoirBuilder]
|
||||
] = None,
|
||||
instrument_unit: Optional[str] = None,
|
||||
):
|
||||
if (
|
||||
instrument_type
|
||||
is instrument_name
|
||||
is instrument_unit
|
||||
is meter_name
|
||||
is meter_version
|
||||
is meter_schema_url
|
||||
is None
|
||||
):
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"Some instrument selection "
|
||||
f"criteria must be provided for View {name}"
|
||||
)
|
||||
|
||||
if (
|
||||
name is not None
|
||||
and instrument_name is not None
|
||||
and ("*" in instrument_name or "?" in instrument_name)
|
||||
):
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
f"View {name} declared with wildcard "
|
||||
"characters in instrument_name"
|
||||
)
|
||||
|
||||
# _name, _description, _aggregation, _exemplar_reservoir_factory and
|
||||
# _attribute_keys will be accessed when instantiating a _ViewInstrumentMatch.
|
||||
self._name = name
|
||||
self._instrument_type = instrument_type
|
||||
self._instrument_name = instrument_name
|
||||
self._instrument_unit = instrument_unit
|
||||
self._meter_name = meter_name
|
||||
self._meter_version = meter_version
|
||||
self._meter_schema_url = meter_schema_url
|
||||
|
||||
self._description = description
|
||||
self._attribute_keys = attribute_keys
|
||||
self._aggregation = aggregation or self._default_aggregation
|
||||
self._exemplar_reservoir_factory = (
|
||||
exemplar_reservoir_factory or _default_reservoir_factory
|
||||
)
|
||||
|
||||
# pylint: disable=too-many-return-statements
|
||||
# pylint: disable=too-many-branches
|
||||
def _match(self, instrument: Instrument) -> bool:
|
||||
if self._instrument_type is not None:
|
||||
if not isinstance(instrument, self._instrument_type):
|
||||
return False
|
||||
|
||||
if self._instrument_name is not None:
|
||||
if not fnmatch(instrument.name, self._instrument_name):
|
||||
return False
|
||||
|
||||
if self._instrument_unit is not None:
|
||||
if not fnmatch(instrument.unit, self._instrument_unit):
|
||||
return False
|
||||
|
||||
if self._meter_name is not None:
|
||||
if instrument.instrumentation_scope.name != self._meter_name:
|
||||
return False
|
||||
|
||||
if self._meter_version is not None:
|
||||
if instrument.instrumentation_scope.version != self._meter_version:
|
||||
return False
|
||||
|
||||
if self._meter_schema_url is not None:
|
||||
if (
|
||||
instrument.instrumentation_scope.schema_url
|
||||
!= self._meter_schema_url
|
||||
):
|
||||
return False
|
||||
|
||||
return True
|
||||
@@ -0,0 +1,66 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from opentelemetry.sdk.metrics._internal.export import (
|
||||
AggregationTemporality,
|
||||
ConsoleMetricExporter,
|
||||
InMemoryMetricReader,
|
||||
MetricExporter,
|
||||
MetricExportResult,
|
||||
MetricReader,
|
||||
PeriodicExportingMetricReader,
|
||||
)
|
||||
|
||||
# The point module is not in the export directory to avoid a circular import.
|
||||
from opentelemetry.sdk.metrics._internal.point import ( # noqa: F401
|
||||
Buckets,
|
||||
DataPointT,
|
||||
DataT,
|
||||
ExponentialHistogram,
|
||||
ExponentialHistogramDataPoint,
|
||||
Gauge,
|
||||
Histogram,
|
||||
HistogramDataPoint,
|
||||
Metric,
|
||||
MetricsData,
|
||||
NumberDataPoint,
|
||||
ResourceMetrics,
|
||||
ScopeMetrics,
|
||||
Sum,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"AggregationTemporality",
|
||||
"Buckets",
|
||||
"ConsoleMetricExporter",
|
||||
"InMemoryMetricReader",
|
||||
"MetricExporter",
|
||||
"MetricExportResult",
|
||||
"MetricReader",
|
||||
"PeriodicExportingMetricReader",
|
||||
"DataPointT",
|
||||
"DataT",
|
||||
"ExponentialHistogram",
|
||||
"ExponentialHistogramDataPoint",
|
||||
"Gauge",
|
||||
"Histogram",
|
||||
"HistogramDataPoint",
|
||||
"Metric",
|
||||
"MetricsData",
|
||||
"NumberDataPoint",
|
||||
"ResourceMetrics",
|
||||
"ScopeMetrics",
|
||||
"Sum",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,35 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from opentelemetry.sdk.metrics._internal.aggregation import (
|
||||
Aggregation,
|
||||
DefaultAggregation,
|
||||
DropAggregation,
|
||||
ExplicitBucketHistogramAggregation,
|
||||
ExponentialBucketHistogramAggregation,
|
||||
LastValueAggregation,
|
||||
SumAggregation,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.view import View
|
||||
|
||||
__all__ = [
|
||||
"Aggregation",
|
||||
"DefaultAggregation",
|
||||
"DropAggregation",
|
||||
"ExplicitBucketHistogramAggregation",
|
||||
"ExponentialBucketHistogramAggregation",
|
||||
"LastValueAggregation",
|
||||
"SumAggregation",
|
||||
"View",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,541 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
This package implements `OpenTelemetry Resources
|
||||
<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#resource-sdk>`_:
|
||||
|
||||
*A Resource is an immutable representation of the entity producing
|
||||
telemetry. For example, a process producing telemetry that is running in
|
||||
a container on Kubernetes has a Pod name, it is in a namespace and
|
||||
possibly is part of a Deployment which also has a name. All three of
|
||||
these attributes can be included in the Resource.*
|
||||
|
||||
Resource objects are created with `Resource.create`, which accepts attributes
|
||||
(key-values). Resources should NOT be created via constructor except by `ResourceDetector`
|
||||
instances which can't use `Resource.create` to avoid infinite loops. Working with
|
||||
`Resource` objects should only be done via the Resource API methods. Resource
|
||||
attributes can also be passed at process invocation in the
|
||||
:envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable. You should register
|
||||
your resource with the `opentelemetry.sdk.trace.TracerProvider` by passing
|
||||
them into their constructors. The `Resource` passed to a provider is available
|
||||
to the exporter, which can send on this information as it sees fit.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
trace.set_tracer_provider(
|
||||
TracerProvider(
|
||||
resource=Resource.create({
|
||||
"service.name": "shoppingcart",
|
||||
"service.instance.id": "instance-12",
|
||||
}),
|
||||
),
|
||||
)
|
||||
print(trace.get_tracer_provider().resource.attributes)
|
||||
|
||||
{'telemetry.sdk.language': 'python',
|
||||
'telemetry.sdk.name': 'opentelemetry',
|
||||
'telemetry.sdk.version': '0.13.dev0',
|
||||
'service.name': 'shoppingcart',
|
||||
'service.instance.id': 'instance-12'}
|
||||
|
||||
Note that the OpenTelemetry project documents certain `"standard attributes"
|
||||
<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/semantic_conventions/README.md>`_
|
||||
that have prescribed semantic meanings, for example ``service.name`` in the
|
||||
above example.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import concurrent.futures
|
||||
import logging
|
||||
import os
|
||||
import platform
|
||||
import socket
|
||||
import sys
|
||||
import typing
|
||||
from json import dumps
|
||||
from os import environ
|
||||
from types import ModuleType
|
||||
from typing import List, MutableMapping, Optional, cast
|
||||
from urllib import parse
|
||||
|
||||
from opentelemetry.attributes import BoundedAttributes
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_EXPERIMENTAL_RESOURCE_DETECTORS,
|
||||
OTEL_RESOURCE_ATTRIBUTES,
|
||||
OTEL_SERVICE_NAME,
|
||||
)
|
||||
from opentelemetry.semconv.resource import ResourceAttributes
|
||||
from opentelemetry.util._importlib_metadata import entry_points, version
|
||||
from opentelemetry.util.types import AttributeValue
|
||||
|
||||
psutil: Optional[ModuleType] = None
|
||||
|
||||
try:
|
||||
import psutil as psutil_module
|
||||
|
||||
psutil = psutil_module
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
LabelValue = AttributeValue
|
||||
Attributes = typing.Mapping[str, LabelValue]
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
CLOUD_PROVIDER = ResourceAttributes.CLOUD_PROVIDER
|
||||
CLOUD_ACCOUNT_ID = ResourceAttributes.CLOUD_ACCOUNT_ID
|
||||
CLOUD_REGION = ResourceAttributes.CLOUD_REGION
|
||||
CLOUD_AVAILABILITY_ZONE = ResourceAttributes.CLOUD_AVAILABILITY_ZONE
|
||||
CONTAINER_NAME = ResourceAttributes.CONTAINER_NAME
|
||||
CONTAINER_ID = ResourceAttributes.CONTAINER_ID
|
||||
CONTAINER_IMAGE_NAME = ResourceAttributes.CONTAINER_IMAGE_NAME
|
||||
CONTAINER_IMAGE_TAG = ResourceAttributes.CONTAINER_IMAGE_TAG
|
||||
DEPLOYMENT_ENVIRONMENT = ResourceAttributes.DEPLOYMENT_ENVIRONMENT
|
||||
FAAS_NAME = ResourceAttributes.FAAS_NAME
|
||||
FAAS_ID = ResourceAttributes.FAAS_ID
|
||||
FAAS_VERSION = ResourceAttributes.FAAS_VERSION
|
||||
FAAS_INSTANCE = ResourceAttributes.FAAS_INSTANCE
|
||||
HOST_NAME = ResourceAttributes.HOST_NAME
|
||||
HOST_ARCH = ResourceAttributes.HOST_ARCH
|
||||
HOST_TYPE = ResourceAttributes.HOST_TYPE
|
||||
HOST_IMAGE_NAME = ResourceAttributes.HOST_IMAGE_NAME
|
||||
HOST_IMAGE_ID = ResourceAttributes.HOST_IMAGE_ID
|
||||
HOST_IMAGE_VERSION = ResourceAttributes.HOST_IMAGE_VERSION
|
||||
KUBERNETES_CLUSTER_NAME = ResourceAttributes.K8S_CLUSTER_NAME
|
||||
KUBERNETES_NAMESPACE_NAME = ResourceAttributes.K8S_NAMESPACE_NAME
|
||||
KUBERNETES_POD_UID = ResourceAttributes.K8S_POD_UID
|
||||
KUBERNETES_POD_NAME = ResourceAttributes.K8S_POD_NAME
|
||||
KUBERNETES_CONTAINER_NAME = ResourceAttributes.K8S_CONTAINER_NAME
|
||||
KUBERNETES_REPLICA_SET_UID = ResourceAttributes.K8S_REPLICASET_UID
|
||||
KUBERNETES_REPLICA_SET_NAME = ResourceAttributes.K8S_REPLICASET_NAME
|
||||
KUBERNETES_DEPLOYMENT_UID = ResourceAttributes.K8S_DEPLOYMENT_UID
|
||||
KUBERNETES_DEPLOYMENT_NAME = ResourceAttributes.K8S_DEPLOYMENT_NAME
|
||||
KUBERNETES_STATEFUL_SET_UID = ResourceAttributes.K8S_STATEFULSET_UID
|
||||
KUBERNETES_STATEFUL_SET_NAME = ResourceAttributes.K8S_STATEFULSET_NAME
|
||||
KUBERNETES_DAEMON_SET_UID = ResourceAttributes.K8S_DAEMONSET_UID
|
||||
KUBERNETES_DAEMON_SET_NAME = ResourceAttributes.K8S_DAEMONSET_NAME
|
||||
KUBERNETES_JOB_UID = ResourceAttributes.K8S_JOB_UID
|
||||
KUBERNETES_JOB_NAME = ResourceAttributes.K8S_JOB_NAME
|
||||
KUBERNETES_CRON_JOB_UID = ResourceAttributes.K8S_CRONJOB_UID
|
||||
KUBERNETES_CRON_JOB_NAME = ResourceAttributes.K8S_CRONJOB_NAME
|
||||
OS_DESCRIPTION = ResourceAttributes.OS_DESCRIPTION
|
||||
OS_TYPE = ResourceAttributes.OS_TYPE
|
||||
OS_VERSION = ResourceAttributes.OS_VERSION
|
||||
PROCESS_PID = ResourceAttributes.PROCESS_PID
|
||||
PROCESS_PARENT_PID = ResourceAttributes.PROCESS_PARENT_PID
|
||||
PROCESS_EXECUTABLE_NAME = ResourceAttributes.PROCESS_EXECUTABLE_NAME
|
||||
PROCESS_EXECUTABLE_PATH = ResourceAttributes.PROCESS_EXECUTABLE_PATH
|
||||
PROCESS_COMMAND = ResourceAttributes.PROCESS_COMMAND
|
||||
PROCESS_COMMAND_LINE = ResourceAttributes.PROCESS_COMMAND_LINE
|
||||
PROCESS_COMMAND_ARGS = ResourceAttributes.PROCESS_COMMAND_ARGS
|
||||
PROCESS_OWNER = ResourceAttributes.PROCESS_OWNER
|
||||
PROCESS_RUNTIME_NAME = ResourceAttributes.PROCESS_RUNTIME_NAME
|
||||
PROCESS_RUNTIME_VERSION = ResourceAttributes.PROCESS_RUNTIME_VERSION
|
||||
PROCESS_RUNTIME_DESCRIPTION = ResourceAttributes.PROCESS_RUNTIME_DESCRIPTION
|
||||
SERVICE_NAME = ResourceAttributes.SERVICE_NAME
|
||||
SERVICE_NAMESPACE = ResourceAttributes.SERVICE_NAMESPACE
|
||||
SERVICE_INSTANCE_ID = ResourceAttributes.SERVICE_INSTANCE_ID
|
||||
SERVICE_VERSION = ResourceAttributes.SERVICE_VERSION
|
||||
TELEMETRY_SDK_NAME = ResourceAttributes.TELEMETRY_SDK_NAME
|
||||
TELEMETRY_SDK_VERSION = ResourceAttributes.TELEMETRY_SDK_VERSION
|
||||
TELEMETRY_AUTO_VERSION = ResourceAttributes.TELEMETRY_AUTO_VERSION
|
||||
TELEMETRY_SDK_LANGUAGE = ResourceAttributes.TELEMETRY_SDK_LANGUAGE
|
||||
|
||||
_OPENTELEMETRY_SDK_VERSION: str = version("opentelemetry-sdk")
|
||||
|
||||
|
||||
class Resource:
|
||||
"""A Resource is an immutable representation of the entity producing telemetry as Attributes."""
|
||||
|
||||
_attributes: BoundedAttributes
|
||||
_schema_url: str
|
||||
|
||||
def __init__(
|
||||
self, attributes: Attributes, schema_url: typing.Optional[str] = None
|
||||
):
|
||||
self._attributes = BoundedAttributes(attributes=attributes)
|
||||
if schema_url is None:
|
||||
schema_url = ""
|
||||
self._schema_url = schema_url
|
||||
|
||||
@staticmethod
|
||||
def create(
|
||||
attributes: typing.Optional[Attributes] = None,
|
||||
schema_url: typing.Optional[str] = None,
|
||||
) -> "Resource":
|
||||
"""Creates a new `Resource` from attributes.
|
||||
|
||||
`ResourceDetector` instances should not call this method.
|
||||
|
||||
Args:
|
||||
attributes: Optional zero or more key-value pairs.
|
||||
schema_url: Optional URL pointing to the schema
|
||||
|
||||
Returns:
|
||||
The newly-created Resource.
|
||||
"""
|
||||
|
||||
if not attributes:
|
||||
attributes = {}
|
||||
|
||||
otel_experimental_resource_detectors = {"otel"}.union(
|
||||
{
|
||||
otel_experimental_resource_detector.strip()
|
||||
for otel_experimental_resource_detector in environ.get(
|
||||
OTEL_EXPERIMENTAL_RESOURCE_DETECTORS, ""
|
||||
).split(",")
|
||||
if otel_experimental_resource_detector
|
||||
}
|
||||
)
|
||||
|
||||
resource_detectors: List[ResourceDetector] = []
|
||||
|
||||
resource_detector: str
|
||||
for resource_detector in otel_experimental_resource_detectors:
|
||||
try:
|
||||
resource_detectors.append(
|
||||
next(
|
||||
iter(
|
||||
entry_points(
|
||||
group="opentelemetry_resource_detector",
|
||||
name=resource_detector.strip(),
|
||||
) # type: ignore
|
||||
)
|
||||
).load()()
|
||||
)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
logger.exception(
|
||||
"Failed to load resource detector '%s', skipping",
|
||||
resource_detector,
|
||||
)
|
||||
continue
|
||||
resource = get_aggregated_resources(
|
||||
resource_detectors, _DEFAULT_RESOURCE
|
||||
).merge(Resource(attributes, schema_url))
|
||||
|
||||
if not resource.attributes.get(SERVICE_NAME, None):
|
||||
default_service_name = "unknown_service"
|
||||
process_executable_name = cast(
|
||||
Optional[str],
|
||||
resource.attributes.get(PROCESS_EXECUTABLE_NAME, None),
|
||||
)
|
||||
if process_executable_name:
|
||||
default_service_name += ":" + process_executable_name
|
||||
resource = resource.merge(
|
||||
Resource({SERVICE_NAME: default_service_name}, schema_url)
|
||||
)
|
||||
return resource
|
||||
|
||||
@staticmethod
|
||||
def get_empty() -> "Resource":
|
||||
return _EMPTY_RESOURCE
|
||||
|
||||
@property
|
||||
def attributes(self) -> Attributes:
|
||||
return self._attributes
|
||||
|
||||
@property
|
||||
def schema_url(self) -> str:
|
||||
return self._schema_url
|
||||
|
||||
def merge(self, other: "Resource") -> "Resource":
|
||||
"""Merges this resource and an updating resource into a new `Resource`.
|
||||
|
||||
If a key exists on both the old and updating resource, the value of the
|
||||
updating resource will override the old resource value.
|
||||
|
||||
The updating resource's `schema_url` will be used only if the old
|
||||
`schema_url` is empty. Attempting to merge two resources with
|
||||
different, non-empty values for `schema_url` will result in an error
|
||||
and return the old resource.
|
||||
|
||||
Args:
|
||||
other: The other resource to be merged.
|
||||
|
||||
Returns:
|
||||
The newly-created Resource.
|
||||
"""
|
||||
merged_attributes = self.attributes.copy() # type: ignore
|
||||
merged_attributes.update(other.attributes) # type: ignore
|
||||
|
||||
if self.schema_url == "":
|
||||
schema_url = other.schema_url
|
||||
elif other.schema_url == "":
|
||||
schema_url = self.schema_url
|
||||
elif self.schema_url == other.schema_url:
|
||||
schema_url = other.schema_url
|
||||
else:
|
||||
logger.error(
|
||||
"Failed to merge resources: The two schemas %s and %s are incompatible",
|
||||
self.schema_url,
|
||||
other.schema_url,
|
||||
)
|
||||
return self
|
||||
return Resource(merged_attributes, schema_url) # type: ignore
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, Resource):
|
||||
return False
|
||||
return (
|
||||
self._attributes == other._attributes
|
||||
and self._schema_url == other._schema_url
|
||||
)
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash(
|
||||
f"{dumps(self._attributes.copy(), sort_keys=True)}|{self._schema_url}" # type: ignore
|
||||
)
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
attributes: MutableMapping[str, AttributeValue] = dict(
|
||||
self._attributes
|
||||
)
|
||||
return dumps(
|
||||
{
|
||||
"attributes": attributes, # type: ignore
|
||||
"schema_url": self._schema_url,
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
|
||||
_EMPTY_RESOURCE = Resource({})
|
||||
_DEFAULT_RESOURCE = Resource(
|
||||
{
|
||||
TELEMETRY_SDK_LANGUAGE: "python",
|
||||
TELEMETRY_SDK_NAME: "opentelemetry",
|
||||
TELEMETRY_SDK_VERSION: _OPENTELEMETRY_SDK_VERSION,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class ResourceDetector(abc.ABC):
|
||||
def __init__(self, raise_on_error: bool = False) -> None:
|
||||
self.raise_on_error = raise_on_error
|
||||
|
||||
@abc.abstractmethod
|
||||
def detect(self) -> "Resource":
|
||||
"""Don't call `Resource.create` here to avoid an infinite loop, instead instantiate `Resource` directly"""
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class OTELResourceDetector(ResourceDetector):
|
||||
# pylint: disable=no-self-use
|
||||
def detect(self) -> "Resource":
|
||||
env_resources_items = environ.get(OTEL_RESOURCE_ATTRIBUTES)
|
||||
env_resource_map = {}
|
||||
|
||||
if env_resources_items:
|
||||
for item in env_resources_items.split(","):
|
||||
try:
|
||||
key, value = item.split("=", maxsplit=1)
|
||||
except ValueError as exc:
|
||||
logger.warning(
|
||||
"Invalid key value resource attribute pair %s: %s",
|
||||
item,
|
||||
exc,
|
||||
)
|
||||
continue
|
||||
value_url_decoded = parse.unquote(value.strip())
|
||||
env_resource_map[key.strip()] = value_url_decoded
|
||||
|
||||
service_name = environ.get(OTEL_SERVICE_NAME)
|
||||
if service_name:
|
||||
env_resource_map[SERVICE_NAME] = service_name
|
||||
return Resource(env_resource_map)
|
||||
|
||||
|
||||
class ProcessResourceDetector(ResourceDetector):
|
||||
# pylint: disable=no-self-use
|
||||
def detect(self) -> "Resource":
|
||||
_runtime_version = ".".join(
|
||||
map(
|
||||
str,
|
||||
(
|
||||
sys.version_info[:3]
|
||||
if sys.version_info.releaselevel == "final"
|
||||
and not sys.version_info.serial
|
||||
else sys.version_info
|
||||
),
|
||||
)
|
||||
)
|
||||
_process_pid = os.getpid()
|
||||
_process_executable_name = sys.executable
|
||||
_process_executable_path = os.path.dirname(_process_executable_name)
|
||||
_process_command = sys.argv[0]
|
||||
_process_command_line = " ".join(sys.argv)
|
||||
_process_command_args = sys.argv
|
||||
resource_info = {
|
||||
PROCESS_RUNTIME_DESCRIPTION: sys.version,
|
||||
PROCESS_RUNTIME_NAME: sys.implementation.name,
|
||||
PROCESS_RUNTIME_VERSION: _runtime_version,
|
||||
PROCESS_PID: _process_pid,
|
||||
PROCESS_EXECUTABLE_NAME: _process_executable_name,
|
||||
PROCESS_EXECUTABLE_PATH: _process_executable_path,
|
||||
PROCESS_COMMAND: _process_command,
|
||||
PROCESS_COMMAND_LINE: _process_command_line,
|
||||
PROCESS_COMMAND_ARGS: _process_command_args,
|
||||
}
|
||||
if hasattr(os, "getppid"):
|
||||
# pypy3 does not have getppid()
|
||||
resource_info[PROCESS_PARENT_PID] = os.getppid()
|
||||
|
||||
if psutil is not None:
|
||||
process: psutil_module.Process = psutil.Process()
|
||||
username = process.username()
|
||||
resource_info[PROCESS_OWNER] = username
|
||||
|
||||
return Resource(resource_info) # type: ignore
|
||||
|
||||
|
||||
class OsResourceDetector(ResourceDetector):
|
||||
"""Detect os resources based on `Operating System conventions <https://opentelemetry.io/docs/specs/semconv/resource/os/>`_."""
|
||||
|
||||
def detect(self) -> "Resource":
|
||||
"""Returns a resource with with ``os.type`` and ``os.version``.
|
||||
|
||||
Python's platform library
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To grab this information, Python's ``platform`` does not return what a
|
||||
user might expect it to. Below is a breakdown of its return values in
|
||||
different operating systems.
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Linux
|
||||
|
||||
>>> platform.system()
|
||||
'Linux'
|
||||
>>> platform.release()
|
||||
'6.5.0-35-generic'
|
||||
>>> platform.version()
|
||||
'#35~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Tue May 7 09:00:52 UTC 2'
|
||||
|
||||
.. code-block:: python
|
||||
:caption: MacOS
|
||||
|
||||
>>> platform.system()
|
||||
'Darwin'
|
||||
>>> platform.release()
|
||||
'23.0.0'
|
||||
>>> platform.version()
|
||||
'Darwin Kernel Version 23.0.0: Fri Sep 15 14:42:57 PDT 2023; root:xnu-10002.1.13~1/RELEASE_ARM64_T8112'
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Windows
|
||||
|
||||
>>> platform.system()
|
||||
'Windows'
|
||||
>>> platform.release()
|
||||
'2022Server'
|
||||
>>> platform.version()
|
||||
'10.0.20348'
|
||||
|
||||
.. code-block:: python
|
||||
:caption: FreeBSD
|
||||
|
||||
>>> platform.system()
|
||||
'FreeBSD'
|
||||
>>> platform.release()
|
||||
'14.1-RELEASE'
|
||||
>>> platform.version()
|
||||
'FreeBSD 14.1-RELEASE releng/14.1-n267679-10e31f0946d8 GENERIC'
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Solaris
|
||||
|
||||
>>> platform.system()
|
||||
'SunOS'
|
||||
>>> platform.release()
|
||||
'5.11'
|
||||
>>> platform.version()
|
||||
'11.4.0.15.0'
|
||||
|
||||
"""
|
||||
|
||||
os_type = platform.system().lower()
|
||||
os_version = platform.release()
|
||||
|
||||
# See docstring
|
||||
if os_type == "windows":
|
||||
os_version = platform.version()
|
||||
# Align SunOS with conventions
|
||||
elif os_type == "sunos":
|
||||
os_type = "solaris"
|
||||
os_version = platform.version()
|
||||
|
||||
return Resource(
|
||||
{
|
||||
OS_TYPE: os_type,
|
||||
OS_VERSION: os_version,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
class _HostResourceDetector(ResourceDetector):
|
||||
"""
|
||||
The HostResourceDetector detects the hostname and architecture attributes.
|
||||
"""
|
||||
|
||||
def detect(self) -> "Resource":
|
||||
return Resource(
|
||||
{
|
||||
HOST_NAME: socket.gethostname(),
|
||||
HOST_ARCH: platform.machine(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def get_aggregated_resources(
|
||||
detectors: typing.List["ResourceDetector"],
|
||||
initial_resource: typing.Optional[Resource] = None,
|
||||
timeout: int = 5,
|
||||
) -> "Resource":
|
||||
"""Retrieves resources from detectors in the order that they were passed
|
||||
|
||||
:param detectors: List of resources in order of priority
|
||||
:param initial_resource: Static resource. This has highest priority
|
||||
:param timeout: Number of seconds to wait for each detector to return
|
||||
:return:
|
||||
"""
|
||||
detectors_merged_resource = initial_resource or Resource.create()
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor:
|
||||
futures = [executor.submit(detector.detect) for detector in detectors]
|
||||
for detector_ind, future in enumerate(futures):
|
||||
detector = detectors[detector_ind]
|
||||
detected_resource: Resource = _EMPTY_RESOURCE
|
||||
try:
|
||||
detected_resource = future.result(timeout=timeout)
|
||||
except concurrent.futures.TimeoutError as ex:
|
||||
if detector.raise_on_error:
|
||||
raise ex
|
||||
logger.warning(
|
||||
"Detector %s took longer than %s seconds, skipping",
|
||||
detector,
|
||||
timeout,
|
||||
)
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as ex:
|
||||
if detector.raise_on_error:
|
||||
raise ex
|
||||
logger.warning(
|
||||
"Exception %s in detector %s, ignoring", ex, detector
|
||||
)
|
||||
finally:
|
||||
detectors_merged_resource = detectors_merged_resource.merge(
|
||||
detected_resource
|
||||
)
|
||||
|
||||
return detectors_merged_resource
|
||||
Binary file not shown.
File diff suppressed because it is too large
Load Diff
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,519 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import collections
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import typing
|
||||
import weakref
|
||||
from enum import Enum
|
||||
from os import environ, linesep
|
||||
from time import time_ns
|
||||
|
||||
from opentelemetry.context import (
|
||||
_SUPPRESS_INSTRUMENTATION_KEY,
|
||||
Context,
|
||||
attach,
|
||||
detach,
|
||||
set_value,
|
||||
)
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_BSP_EXPORT_TIMEOUT,
|
||||
OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
|
||||
OTEL_BSP_MAX_QUEUE_SIZE,
|
||||
OTEL_BSP_SCHEDULE_DELAY,
|
||||
)
|
||||
from opentelemetry.sdk.trace import ReadableSpan, Span, SpanProcessor
|
||||
from opentelemetry.util._once import Once
|
||||
|
||||
_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512
|
||||
_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000
|
||||
_DEFAULT_MAX_QUEUE_SIZE = 2048
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE = (
|
||||
"Unable to parse value for %s as integer. Defaulting to %s."
|
||||
)
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SpanExportResult(Enum):
|
||||
SUCCESS = 0
|
||||
FAILURE = 1
|
||||
|
||||
|
||||
class SpanExporter:
|
||||
"""Interface for exporting spans.
|
||||
|
||||
Interface to be implemented by services that want to export spans recorded
|
||||
in their own format.
|
||||
|
||||
To export data this MUST be registered to the :class`opentelemetry.sdk.trace.Tracer` using a
|
||||
`SimpleSpanProcessor` or a `BatchSpanProcessor`.
|
||||
"""
|
||||
|
||||
def export(
|
||||
self, spans: typing.Sequence[ReadableSpan]
|
||||
) -> "SpanExportResult":
|
||||
"""Exports a batch of telemetry data.
|
||||
|
||||
Args:
|
||||
spans: The list of `opentelemetry.trace.Span` objects to be exported
|
||||
|
||||
Returns:
|
||||
The result of the export
|
||||
"""
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shuts down the exporter.
|
||||
|
||||
Called when the SDK is shut down.
|
||||
"""
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Hint to ensure that the export of any spans the exporter has received
|
||||
prior to the call to ForceFlush SHOULD be completed as soon as possible, preferably
|
||||
before returning from this method.
|
||||
"""
|
||||
|
||||
|
||||
class SimpleSpanProcessor(SpanProcessor):
|
||||
"""Simple SpanProcessor implementation.
|
||||
|
||||
SimpleSpanProcessor is an implementation of `SpanProcessor` that
|
||||
passes ended spans directly to the configured `SpanExporter`.
|
||||
"""
|
||||
|
||||
def __init__(self, span_exporter: SpanExporter):
|
||||
self.span_exporter = span_exporter
|
||||
|
||||
def on_start(
|
||||
self, span: Span, parent_context: typing.Optional[Context] = None
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def on_end(self, span: ReadableSpan) -> None:
|
||||
if not span.context.trace_flags.sampled:
|
||||
return
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
try:
|
||||
self.span_exporter.export((span,))
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception:
|
||||
logger.exception("Exception while exporting Span.")
|
||||
detach(token)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self.span_exporter.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
# pylint: disable=unused-argument
|
||||
return True
|
||||
|
||||
|
||||
class _FlushRequest:
|
||||
"""Represents a request for the BatchSpanProcessor to flush spans."""
|
||||
|
||||
__slots__ = ["event", "num_spans"]
|
||||
|
||||
def __init__(self):
|
||||
self.event = threading.Event()
|
||||
self.num_spans = 0
|
||||
|
||||
|
||||
_BSP_RESET_ONCE = Once()
|
||||
|
||||
|
||||
class BatchSpanProcessor(SpanProcessor):
|
||||
"""Batch span processor implementation.
|
||||
|
||||
`BatchSpanProcessor` is an implementation of `SpanProcessor` that
|
||||
batches ended spans and pushes them to the configured `SpanExporter`.
|
||||
|
||||
`BatchSpanProcessor` is configurable with the following environment
|
||||
variables which correspond to constructor parameters:
|
||||
|
||||
- :envvar:`OTEL_BSP_SCHEDULE_DELAY`
|
||||
- :envvar:`OTEL_BSP_MAX_QUEUE_SIZE`
|
||||
- :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE`
|
||||
- :envvar:`OTEL_BSP_EXPORT_TIMEOUT`
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
span_exporter: SpanExporter,
|
||||
max_queue_size: int | None = None,
|
||||
schedule_delay_millis: float | None = None,
|
||||
max_export_batch_size: int | None = None,
|
||||
export_timeout_millis: float | None = None,
|
||||
):
|
||||
if max_queue_size is None:
|
||||
max_queue_size = BatchSpanProcessor._default_max_queue_size()
|
||||
|
||||
if schedule_delay_millis is None:
|
||||
schedule_delay_millis = (
|
||||
BatchSpanProcessor._default_schedule_delay_millis()
|
||||
)
|
||||
|
||||
if max_export_batch_size is None:
|
||||
max_export_batch_size = (
|
||||
BatchSpanProcessor._default_max_export_batch_size()
|
||||
)
|
||||
|
||||
if export_timeout_millis is None:
|
||||
export_timeout_millis = (
|
||||
BatchSpanProcessor._default_export_timeout_millis()
|
||||
)
|
||||
|
||||
BatchSpanProcessor._validate_arguments(
|
||||
max_queue_size, schedule_delay_millis, max_export_batch_size
|
||||
)
|
||||
|
||||
self.span_exporter = span_exporter
|
||||
self.queue = collections.deque([], max_queue_size) # type: typing.Deque[Span]
|
||||
self.worker_thread = threading.Thread(
|
||||
name="OtelBatchSpanProcessor", target=self.worker, daemon=True
|
||||
)
|
||||
self.condition = threading.Condition(threading.Lock())
|
||||
self._flush_request = None # type: typing.Optional[_FlushRequest]
|
||||
self.schedule_delay_millis = schedule_delay_millis
|
||||
self.max_export_batch_size = max_export_batch_size
|
||||
self.max_queue_size = max_queue_size
|
||||
self.export_timeout_millis = export_timeout_millis
|
||||
self.done = False
|
||||
# flag that indicates that spans are being dropped
|
||||
self._spans_dropped = False
|
||||
# precallocated list to send spans to exporter
|
||||
self.spans_list = [None] * self.max_export_batch_size # type: typing.List[typing.Optional[Span]]
|
||||
self.worker_thread.start()
|
||||
if hasattr(os, "register_at_fork"):
|
||||
weak_reinit = weakref.WeakMethod(self._at_fork_reinit)
|
||||
os.register_at_fork(after_in_child=lambda: weak_reinit()()) # pylint: disable=unnecessary-lambda
|
||||
self._pid = os.getpid()
|
||||
|
||||
def on_start(
|
||||
self, span: Span, parent_context: Context | None = None
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
def on_end(self, span: ReadableSpan) -> None:
|
||||
if self.done:
|
||||
logger.warning("Already shutdown, dropping span.")
|
||||
return
|
||||
if not span.context.trace_flags.sampled:
|
||||
return
|
||||
if self._pid != os.getpid():
|
||||
_BSP_RESET_ONCE.do_once(self._at_fork_reinit)
|
||||
|
||||
if len(self.queue) == self.max_queue_size:
|
||||
if not self._spans_dropped:
|
||||
logger.warning("Queue is full, likely spans will be dropped.")
|
||||
self._spans_dropped = True
|
||||
|
||||
self.queue.appendleft(span)
|
||||
|
||||
if len(self.queue) >= self.max_export_batch_size:
|
||||
with self.condition:
|
||||
self.condition.notify()
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
self.condition = threading.Condition(threading.Lock())
|
||||
self.queue.clear()
|
||||
|
||||
# worker_thread is local to a process, only the thread that issued fork continues
|
||||
# to exist. A new worker thread must be started in child process.
|
||||
self.worker_thread = threading.Thread(
|
||||
name="OtelBatchSpanProcessor", target=self.worker, daemon=True
|
||||
)
|
||||
self.worker_thread.start()
|
||||
self._pid = os.getpid()
|
||||
|
||||
def worker(self):
|
||||
timeout = self.schedule_delay_millis / 1e3
|
||||
flush_request = None # type: typing.Optional[_FlushRequest]
|
||||
while not self.done:
|
||||
with self.condition:
|
||||
if self.done:
|
||||
# done flag may have changed, avoid waiting
|
||||
break
|
||||
flush_request = self._get_and_unset_flush_request()
|
||||
if (
|
||||
len(self.queue) < self.max_export_batch_size
|
||||
and flush_request is None
|
||||
):
|
||||
self.condition.wait(timeout)
|
||||
flush_request = self._get_and_unset_flush_request()
|
||||
if not self.queue:
|
||||
# spurious notification, let's wait again, reset timeout
|
||||
timeout = self.schedule_delay_millis / 1e3
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
flush_request = None
|
||||
continue
|
||||
if self.done:
|
||||
# missing spans will be sent when calling flush
|
||||
break
|
||||
|
||||
# subtract the duration of this export call to the next timeout
|
||||
start = time_ns()
|
||||
self._export(flush_request)
|
||||
end = time_ns()
|
||||
duration = (end - start) / 1e9
|
||||
timeout = self.schedule_delay_millis / 1e3 - duration
|
||||
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
flush_request = None
|
||||
|
||||
# there might have been a new flush request while export was running
|
||||
# and before the done flag switched to true
|
||||
with self.condition:
|
||||
shutdown_flush_request = self._get_and_unset_flush_request()
|
||||
|
||||
# be sure that all spans are sent
|
||||
self._drain_queue()
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
self._notify_flush_request_finished(shutdown_flush_request)
|
||||
|
||||
def _get_and_unset_flush_request(
|
||||
self,
|
||||
) -> typing.Optional[_FlushRequest]:
|
||||
"""Returns the current flush request and makes it invisible to the
|
||||
worker thread for subsequent calls.
|
||||
"""
|
||||
flush_request = self._flush_request
|
||||
self._flush_request = None
|
||||
if flush_request is not None:
|
||||
flush_request.num_spans = len(self.queue)
|
||||
return flush_request
|
||||
|
||||
@staticmethod
|
||||
def _notify_flush_request_finished(
|
||||
flush_request: typing.Optional[_FlushRequest],
|
||||
):
|
||||
"""Notifies the flush initiator(s) waiting on the given request/event
|
||||
that the flush operation was finished.
|
||||
"""
|
||||
if flush_request is not None:
|
||||
flush_request.event.set()
|
||||
|
||||
def _get_or_create_flush_request(self) -> _FlushRequest:
|
||||
"""Either returns the current active flush event or creates a new one.
|
||||
|
||||
The flush event will be visible and read by the worker thread before an
|
||||
export operation starts. Callers of a flush operation may wait on the
|
||||
returned event to be notified when the flush/export operation was
|
||||
finished.
|
||||
|
||||
This method is not thread-safe, i.e. callers need to take care about
|
||||
synchronization/locking.
|
||||
"""
|
||||
if self._flush_request is None:
|
||||
self._flush_request = _FlushRequest()
|
||||
return self._flush_request
|
||||
|
||||
def _export(self, flush_request: typing.Optional[_FlushRequest]):
|
||||
"""Exports spans considering the given flush_request.
|
||||
|
||||
In case of a given flush_requests spans are exported in batches until
|
||||
the number of exported spans reached or exceeded the number of spans in
|
||||
the flush request.
|
||||
In no flush_request was given at most max_export_batch_size spans are
|
||||
exported.
|
||||
"""
|
||||
if not flush_request:
|
||||
self._export_batch()
|
||||
return
|
||||
|
||||
num_spans = flush_request.num_spans
|
||||
while self.queue:
|
||||
num_exported = self._export_batch()
|
||||
num_spans -= num_exported
|
||||
|
||||
if num_spans <= 0:
|
||||
break
|
||||
|
||||
def _export_batch(self) -> int:
|
||||
"""Exports at most max_export_batch_size spans and returns the number of
|
||||
exported spans.
|
||||
"""
|
||||
idx = 0
|
||||
# currently only a single thread acts as consumer, so queue.pop() will
|
||||
# not raise an exception
|
||||
while idx < self.max_export_batch_size and self.queue:
|
||||
self.spans_list[idx] = self.queue.pop()
|
||||
idx += 1
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
try:
|
||||
# Ignore type b/c the Optional[None]+slicing is too "clever"
|
||||
# for mypy
|
||||
self.span_exporter.export(self.spans_list[:idx]) # type: ignore
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
logger.exception("Exception while exporting Span batch.")
|
||||
detach(token)
|
||||
|
||||
# clean up list
|
||||
for index in range(idx):
|
||||
self.spans_list[index] = None
|
||||
return idx
|
||||
|
||||
def _drain_queue(self):
|
||||
"""Export all elements until queue is empty.
|
||||
|
||||
Can only be called from the worker thread context because it invokes
|
||||
`export` that is not thread safe.
|
||||
"""
|
||||
while self.queue:
|
||||
self._export_batch()
|
||||
|
||||
def force_flush(self, timeout_millis: int | None = None) -> bool:
|
||||
if timeout_millis is None:
|
||||
timeout_millis = self.export_timeout_millis
|
||||
|
||||
if self.done:
|
||||
logger.warning("Already shutdown, ignoring call to force_flush().")
|
||||
return True
|
||||
|
||||
with self.condition:
|
||||
flush_request = self._get_or_create_flush_request()
|
||||
# signal the worker thread to flush and wait for it to finish
|
||||
self.condition.notify_all()
|
||||
|
||||
# wait for token to be processed
|
||||
ret = flush_request.event.wait(timeout_millis / 1e3)
|
||||
if not ret:
|
||||
logger.warning("Timeout was exceeded in force_flush().")
|
||||
return ret
|
||||
|
||||
def shutdown(self) -> None:
|
||||
# signal the worker thread to finish and then wait for it
|
||||
self.done = True
|
||||
with self.condition:
|
||||
self.condition.notify_all()
|
||||
self.worker_thread.join()
|
||||
self.span_exporter.shutdown()
|
||||
|
||||
@staticmethod
|
||||
def _default_max_queue_size():
|
||||
try:
|
||||
return int(
|
||||
environ.get(OTEL_BSP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE)
|
||||
)
|
||||
except ValueError:
|
||||
logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BSP_MAX_QUEUE_SIZE,
|
||||
_DEFAULT_MAX_QUEUE_SIZE,
|
||||
)
|
||||
return _DEFAULT_MAX_QUEUE_SIZE
|
||||
|
||||
@staticmethod
|
||||
def _default_schedule_delay_millis():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BSP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BSP_SCHEDULE_DELAY,
|
||||
_DEFAULT_SCHEDULE_DELAY_MILLIS,
|
||||
)
|
||||
return _DEFAULT_SCHEDULE_DELAY_MILLIS
|
||||
|
||||
@staticmethod
|
||||
def _default_max_export_batch_size():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE,
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BSP_MAX_EXPORT_BATCH_SIZE,
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE,
|
||||
)
|
||||
return _DEFAULT_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
@staticmethod
|
||||
def _default_export_timeout_millis():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BSP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BSP_EXPORT_TIMEOUT,
|
||||
_DEFAULT_EXPORT_TIMEOUT_MILLIS,
|
||||
)
|
||||
return _DEFAULT_EXPORT_TIMEOUT_MILLIS
|
||||
|
||||
@staticmethod
|
||||
def _validate_arguments(
|
||||
max_queue_size, schedule_delay_millis, max_export_batch_size
|
||||
):
|
||||
if max_queue_size <= 0:
|
||||
raise ValueError("max_queue_size must be a positive integer.")
|
||||
|
||||
if schedule_delay_millis <= 0:
|
||||
raise ValueError("schedule_delay_millis must be positive.")
|
||||
|
||||
if max_export_batch_size <= 0:
|
||||
raise ValueError(
|
||||
"max_export_batch_size must be a positive integer."
|
||||
)
|
||||
|
||||
if max_export_batch_size > max_queue_size:
|
||||
raise ValueError(
|
||||
"max_export_batch_size must be less than or equal to max_queue_size."
|
||||
)
|
||||
|
||||
|
||||
class ConsoleSpanExporter(SpanExporter):
|
||||
"""Implementation of :class:`SpanExporter` that prints spans to the
|
||||
console.
|
||||
|
||||
This class can be used for diagnostic purposes. It prints the exported
|
||||
spans to the console STDOUT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
service_name: str | None = None,
|
||||
out: typing.IO = sys.stdout,
|
||||
formatter: typing.Callable[
|
||||
[ReadableSpan], str
|
||||
] = lambda span: span.to_json() + linesep,
|
||||
):
|
||||
self.out = out
|
||||
self.formatter = formatter
|
||||
self.service_name = service_name
|
||||
|
||||
def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
|
||||
for span in spans:
|
||||
self.out.write(self.formatter(span))
|
||||
self.out.flush()
|
||||
return SpanExportResult.SUCCESS
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
return True
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,61 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
import typing
|
||||
|
||||
from opentelemetry.sdk.trace import ReadableSpan
|
||||
from opentelemetry.sdk.trace.export import SpanExporter, SpanExportResult
|
||||
|
||||
|
||||
class InMemorySpanExporter(SpanExporter):
|
||||
"""Implementation of :class:`.SpanExporter` that stores spans in memory.
|
||||
|
||||
This class can be used for testing purposes. It stores the exported spans
|
||||
in a list in memory that can be retrieved using the
|
||||
:func:`.get_finished_spans` method.
|
||||
"""
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._finished_spans: typing.List[ReadableSpan] = []
|
||||
self._stopped = False
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def clear(self) -> None:
|
||||
"""Clear list of collected spans."""
|
||||
with self._lock:
|
||||
self._finished_spans.clear()
|
||||
|
||||
def get_finished_spans(self) -> typing.Tuple[ReadableSpan, ...]:
|
||||
"""Get list of collected spans."""
|
||||
with self._lock:
|
||||
return tuple(self._finished_spans)
|
||||
|
||||
def export(self, spans: typing.Sequence[ReadableSpan]) -> SpanExportResult:
|
||||
"""Stores a list of spans in memory."""
|
||||
if self._stopped:
|
||||
return SpanExportResult.FAILURE
|
||||
with self._lock:
|
||||
self._finished_spans.extend(spans)
|
||||
return SpanExportResult.SUCCESS
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shut downs the exporter.
|
||||
|
||||
Calls to export after the exporter has been shut down will fail.
|
||||
"""
|
||||
self._stopped = True
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
return True
|
||||
@@ -0,0 +1,60 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import random
|
||||
|
||||
from opentelemetry import trace
|
||||
|
||||
|
||||
class IdGenerator(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def generate_span_id(self) -> int:
|
||||
"""Get a new span ID.
|
||||
|
||||
Returns:
|
||||
A 64-bit int for use as a span ID
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def generate_trace_id(self) -> int:
|
||||
"""Get a new trace ID.
|
||||
|
||||
Implementations should at least make the 64 least significant bits
|
||||
uniformly random. Samplers like the `TraceIdRatioBased` sampler rely on
|
||||
this randomness to make sampling decisions.
|
||||
|
||||
See `the specification on TraceIdRatioBased <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#traceidratiobased>`_.
|
||||
|
||||
Returns:
|
||||
A 128-bit int for use as a trace ID
|
||||
"""
|
||||
|
||||
|
||||
class RandomIdGenerator(IdGenerator):
|
||||
"""The default ID generator for TracerProvider which randomly generates all
|
||||
bits when generating IDs.
|
||||
"""
|
||||
|
||||
def generate_span_id(self) -> int:
|
||||
span_id = random.getrandbits(64)
|
||||
while span_id == trace.INVALID_SPAN_ID:
|
||||
span_id = random.getrandbits(64)
|
||||
return span_id
|
||||
|
||||
def generate_trace_id(self) -> int:
|
||||
trace_id = random.getrandbits(128)
|
||||
while trace_id == trace.INVALID_TRACE_ID:
|
||||
trace_id = random.getrandbits(128)
|
||||
return trace_id
|
||||
@@ -0,0 +1,453 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
For general information about sampling, see `the specification <https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/sdk.md#sampling>`_.
|
||||
|
||||
OpenTelemetry provides two types of samplers:
|
||||
|
||||
- `StaticSampler`
|
||||
- `TraceIdRatioBased`
|
||||
|
||||
A `StaticSampler` always returns the same sampling result regardless of the conditions. Both possible StaticSamplers are already created:
|
||||
|
||||
- Always sample spans: ALWAYS_ON
|
||||
- Never sample spans: ALWAYS_OFF
|
||||
|
||||
A `TraceIdRatioBased` sampler makes a random sampling result based on the sampling probability given.
|
||||
|
||||
If the span being sampled has a parent, `ParentBased` will respect the parent delegate sampler. Otherwise, it returns the sampling result from the given root sampler.
|
||||
|
||||
Currently, sampling results are always made during the creation of the span. However, this might not always be the case in the future (see `OTEP #115 <https://github.com/open-telemetry/oteps/pull/115>`_).
|
||||
|
||||
Custom samplers can be created by subclassing `Sampler` and implementing `Sampler.should_sample` as well as `Sampler.get_description`.
|
||||
|
||||
Samplers are able to modify the `opentelemetry.trace.span.TraceState` of the parent of the span being created. For custom samplers, it is suggested to implement `Sampler.should_sample` to utilize the
|
||||
parent span context's `opentelemetry.trace.span.TraceState` and pass into the `SamplingResult` instead of the explicit trace_state field passed into the parameter of `Sampler.should_sample`.
|
||||
|
||||
To use a sampler, pass it into the tracer provider constructor. For example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ConsoleSpanExporter,
|
||||
SimpleSpanProcessor,
|
||||
)
|
||||
from opentelemetry.sdk.trace.sampling import TraceIdRatioBased
|
||||
|
||||
# sample 1 in every 1000 traces
|
||||
sampler = TraceIdRatioBased(1/1000)
|
||||
|
||||
# set the sampler onto the global tracer provider
|
||||
trace.set_tracer_provider(TracerProvider(sampler=sampler))
|
||||
|
||||
# set up an exporter for sampled spans
|
||||
trace.get_tracer_provider().add_span_processor(
|
||||
SimpleSpanProcessor(ConsoleSpanExporter())
|
||||
)
|
||||
|
||||
# created spans will now be sampled by the TraceIdRatioBased sampler
|
||||
with trace.get_tracer(__name__).start_as_current_span("Test Span"):
|
||||
...
|
||||
|
||||
The tracer sampler can also be configured via environment variables ``OTEL_TRACES_SAMPLER`` and ``OTEL_TRACES_SAMPLER_ARG`` (only if applicable).
|
||||
The list of built-in values for ``OTEL_TRACES_SAMPLER`` are:
|
||||
|
||||
* always_on - Sampler that always samples spans, regardless of the parent span's sampling decision.
|
||||
* always_off - Sampler that never samples spans, regardless of the parent span's sampling decision.
|
||||
* traceidratio - Sampler that samples probabilistically based on rate.
|
||||
* parentbased_always_on - (default) Sampler that respects its parent span's sampling decision, but otherwise always samples.
|
||||
* parentbased_always_off - Sampler that respects its parent span's sampling decision, but otherwise never samples.
|
||||
* parentbased_traceidratio - Sampler that respects its parent span's sampling decision, but otherwise samples probabilistically based on rate.
|
||||
|
||||
Sampling probability can be set with ``OTEL_TRACES_SAMPLER_ARG`` if the sampler is traceidratio or parentbased_traceidratio. Rate must be in the range [0.0,1.0]. When not provided rate will be set to
|
||||
1.0 (maximum rate possible).
|
||||
|
||||
Prev example but with environment variables. Please make sure to set the env ``OTEL_TRACES_SAMPLER=traceidratio`` and ``OTEL_TRACES_SAMPLER_ARG=0.001``.
|
||||
|
||||
.. code:: python
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ConsoleSpanExporter,
|
||||
SimpleSpanProcessor,
|
||||
)
|
||||
|
||||
trace.set_tracer_provider(TracerProvider())
|
||||
|
||||
# set up an exporter for sampled spans
|
||||
trace.get_tracer_provider().add_span_processor(
|
||||
SimpleSpanProcessor(ConsoleSpanExporter())
|
||||
)
|
||||
|
||||
# created spans will now be sampled by the TraceIdRatioBased sampler with rate 1/1000.
|
||||
with trace.get_tracer(__name__).start_as_current_span("Test Span"):
|
||||
...
|
||||
|
||||
When utilizing a configurator, you can configure a custom sampler. In order to create a configurable custom sampler, create an entry point for the custom sampler
|
||||
factory method or function under the entry point group, ``opentelemetry_traces_sampler``. The custom sampler factory method must be of type ``Callable[[str], Sampler]``, taking a single string argument and
|
||||
returning a Sampler object. The single input will come from the string value of the ``OTEL_TRACES_SAMPLER_ARG`` environment variable. If ``OTEL_TRACES_SAMPLER_ARG`` is not configured, the input will
|
||||
be an empty string. For example:
|
||||
|
||||
.. code:: python
|
||||
|
||||
setup(
|
||||
...
|
||||
entry_points={
|
||||
...
|
||||
"opentelemetry_traces_sampler": [
|
||||
"custom_sampler_name = path.to.sampler.factory.method:CustomSamplerFactory.get_sampler"
|
||||
]
|
||||
}
|
||||
)
|
||||
# ...
|
||||
class CustomRatioSampler(Sampler):
|
||||
def __init__(rate):
|
||||
# ...
|
||||
# ...
|
||||
class CustomSamplerFactory:
|
||||
@staticmethod
|
||||
def get_sampler(sampler_argument):
|
||||
try:
|
||||
rate = float(sampler_argument)
|
||||
return CustomSampler(rate)
|
||||
except ValueError: # In case argument is empty string.
|
||||
return CustomSampler(0.5)
|
||||
|
||||
In order to configure you application with a custom sampler's entry point, set the ``OTEL_TRACES_SAMPLER`` environment variable to the key name of the entry point. For example, to configured the
|
||||
above sampler, set ``OTEL_TRACES_SAMPLER=custom_sampler_name`` and ``OTEL_TRACES_SAMPLER_ARG=0.5``.
|
||||
"""
|
||||
|
||||
import abc
|
||||
import enum
|
||||
import os
|
||||
from logging import getLogger
|
||||
from types import MappingProxyType
|
||||
from typing import Optional, Sequence
|
||||
|
||||
# pylint: disable=unused-import
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_TRACES_SAMPLER,
|
||||
OTEL_TRACES_SAMPLER_ARG,
|
||||
)
|
||||
from opentelemetry.trace import Link, SpanKind, get_current_span
|
||||
from opentelemetry.trace.span import TraceState
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class Decision(enum.Enum):
|
||||
# IsRecording() == false, span will not be recorded and all events and attributes will be dropped.
|
||||
DROP = 0
|
||||
# IsRecording() == true, but Sampled flag MUST NOT be set.
|
||||
RECORD_ONLY = 1
|
||||
# IsRecording() == true AND Sampled flag` MUST be set.
|
||||
RECORD_AND_SAMPLE = 2
|
||||
|
||||
def is_recording(self):
|
||||
return self in (Decision.RECORD_ONLY, Decision.RECORD_AND_SAMPLE)
|
||||
|
||||
def is_sampled(self):
|
||||
return self is Decision.RECORD_AND_SAMPLE
|
||||
|
||||
|
||||
class SamplingResult:
|
||||
"""A sampling result as applied to a newly-created Span.
|
||||
|
||||
Args:
|
||||
decision: A sampling decision based off of whether the span is recorded
|
||||
and the sampled flag in trace flags in the span context.
|
||||
attributes: Attributes to add to the `opentelemetry.trace.Span`.
|
||||
trace_state: The tracestate used for the `opentelemetry.trace.Span`.
|
||||
Could possibly have been modified by the sampler.
|
||||
"""
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{type(self).__name__}({str(self.decision)}, attributes={str(self.attributes)})"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
decision: Decision,
|
||||
attributes: "Attributes" = None,
|
||||
trace_state: Optional["TraceState"] = None,
|
||||
) -> None:
|
||||
self.decision = decision
|
||||
if attributes is None:
|
||||
self.attributes = MappingProxyType({})
|
||||
else:
|
||||
self.attributes = MappingProxyType(attributes)
|
||||
self.trace_state = trace_state
|
||||
|
||||
|
||||
class Sampler(abc.ABC):
|
||||
@abc.abstractmethod
|
||||
def should_sample(
|
||||
self,
|
||||
parent_context: Optional["Context"],
|
||||
trace_id: int,
|
||||
name: str,
|
||||
kind: Optional[SpanKind] = None,
|
||||
attributes: Attributes = None,
|
||||
links: Optional[Sequence["Link"]] = None,
|
||||
trace_state: Optional["TraceState"] = None,
|
||||
) -> "SamplingResult":
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_description(self) -> str:
|
||||
pass
|
||||
|
||||
|
||||
class StaticSampler(Sampler):
|
||||
"""Sampler that always returns the same decision."""
|
||||
|
||||
def __init__(self, decision: "Decision") -> None:
|
||||
self._decision = decision
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
parent_context: Optional["Context"],
|
||||
trace_id: int,
|
||||
name: str,
|
||||
kind: Optional[SpanKind] = None,
|
||||
attributes: Attributes = None,
|
||||
links: Optional[Sequence["Link"]] = None,
|
||||
trace_state: Optional["TraceState"] = None,
|
||||
) -> "SamplingResult":
|
||||
if self._decision is Decision.DROP:
|
||||
attributes = None
|
||||
return SamplingResult(
|
||||
self._decision,
|
||||
attributes,
|
||||
_get_parent_trace_state(parent_context),
|
||||
)
|
||||
|
||||
def get_description(self) -> str:
|
||||
if self._decision is Decision.DROP:
|
||||
return "AlwaysOffSampler"
|
||||
return "AlwaysOnSampler"
|
||||
|
||||
|
||||
ALWAYS_OFF = StaticSampler(Decision.DROP)
|
||||
"""Sampler that never samples spans, regardless of the parent span's sampling decision."""
|
||||
|
||||
ALWAYS_ON = StaticSampler(Decision.RECORD_AND_SAMPLE)
|
||||
"""Sampler that always samples spans, regardless of the parent span's sampling decision."""
|
||||
|
||||
|
||||
class TraceIdRatioBased(Sampler):
|
||||
"""
|
||||
Sampler that makes sampling decisions probabilistically based on `rate`.
|
||||
|
||||
Args:
|
||||
rate: Probability (between 0 and 1) that a span will be sampled
|
||||
"""
|
||||
|
||||
def __init__(self, rate: float):
|
||||
if rate < 0.0 or rate > 1.0:
|
||||
raise ValueError("Probability must be in range [0.0, 1.0].")
|
||||
self._rate = rate
|
||||
self._bound = self.get_bound_for_rate(self._rate)
|
||||
|
||||
# For compatibility with 64 bit trace IDs, the sampler checks the 64
|
||||
# low-order bits of the trace ID to decide whether to sample a given trace.
|
||||
TRACE_ID_LIMIT = (1 << 64) - 1
|
||||
|
||||
@classmethod
|
||||
def get_bound_for_rate(cls, rate: float) -> int:
|
||||
return round(rate * (cls.TRACE_ID_LIMIT + 1))
|
||||
|
||||
@property
|
||||
def rate(self) -> float:
|
||||
return self._rate
|
||||
|
||||
@property
|
||||
def bound(self) -> int:
|
||||
return self._bound
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
parent_context: Optional["Context"],
|
||||
trace_id: int,
|
||||
name: str,
|
||||
kind: Optional[SpanKind] = None,
|
||||
attributes: Attributes = None,
|
||||
links: Optional[Sequence["Link"]] = None,
|
||||
trace_state: Optional["TraceState"] = None,
|
||||
) -> "SamplingResult":
|
||||
decision = Decision.DROP
|
||||
if trace_id & self.TRACE_ID_LIMIT < self.bound:
|
||||
decision = Decision.RECORD_AND_SAMPLE
|
||||
if decision is Decision.DROP:
|
||||
attributes = None
|
||||
return SamplingResult(
|
||||
decision,
|
||||
attributes,
|
||||
_get_parent_trace_state(parent_context),
|
||||
)
|
||||
|
||||
def get_description(self) -> str:
|
||||
return f"TraceIdRatioBased{{{self._rate}}}"
|
||||
|
||||
|
||||
class ParentBased(Sampler):
|
||||
"""
|
||||
If a parent is set, applies the respective delegate sampler.
|
||||
Otherwise, uses the root provided at initialization to make a
|
||||
decision.
|
||||
|
||||
Args:
|
||||
root: Sampler called for spans with no parent (root spans).
|
||||
remote_parent_sampled: Sampler called for a remote sampled parent.
|
||||
remote_parent_not_sampled: Sampler called for a remote parent that is
|
||||
not sampled.
|
||||
local_parent_sampled: Sampler called for a local sampled parent.
|
||||
local_parent_not_sampled: Sampler called for a local parent that is
|
||||
not sampled.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
root: Sampler,
|
||||
remote_parent_sampled: Sampler = ALWAYS_ON,
|
||||
remote_parent_not_sampled: Sampler = ALWAYS_OFF,
|
||||
local_parent_sampled: Sampler = ALWAYS_ON,
|
||||
local_parent_not_sampled: Sampler = ALWAYS_OFF,
|
||||
):
|
||||
self._root = root
|
||||
self._remote_parent_sampled = remote_parent_sampled
|
||||
self._remote_parent_not_sampled = remote_parent_not_sampled
|
||||
self._local_parent_sampled = local_parent_sampled
|
||||
self._local_parent_not_sampled = local_parent_not_sampled
|
||||
|
||||
def should_sample(
|
||||
self,
|
||||
parent_context: Optional["Context"],
|
||||
trace_id: int,
|
||||
name: str,
|
||||
kind: Optional[SpanKind] = None,
|
||||
attributes: Attributes = None,
|
||||
links: Optional[Sequence["Link"]] = None,
|
||||
trace_state: Optional["TraceState"] = None,
|
||||
) -> "SamplingResult":
|
||||
parent_span_context = get_current_span(
|
||||
parent_context
|
||||
).get_span_context()
|
||||
# default to the root sampler
|
||||
sampler = self._root
|
||||
# respect the sampling and remote flag of the parent if present
|
||||
if parent_span_context is not None and parent_span_context.is_valid:
|
||||
if parent_span_context.is_remote:
|
||||
if parent_span_context.trace_flags.sampled:
|
||||
sampler = self._remote_parent_sampled
|
||||
else:
|
||||
sampler = self._remote_parent_not_sampled
|
||||
else:
|
||||
if parent_span_context.trace_flags.sampled:
|
||||
sampler = self._local_parent_sampled
|
||||
else:
|
||||
sampler = self._local_parent_not_sampled
|
||||
|
||||
return sampler.should_sample(
|
||||
parent_context=parent_context,
|
||||
trace_id=trace_id,
|
||||
name=name,
|
||||
kind=kind,
|
||||
attributes=attributes,
|
||||
links=links,
|
||||
)
|
||||
|
||||
def get_description(self):
|
||||
return f"ParentBased{{root:{self._root.get_description()},remoteParentSampled:{self._remote_parent_sampled.get_description()},remoteParentNotSampled:{self._remote_parent_not_sampled.get_description()},localParentSampled:{self._local_parent_sampled.get_description()},localParentNotSampled:{self._local_parent_not_sampled.get_description()}}}"
|
||||
|
||||
|
||||
DEFAULT_OFF = ParentBased(ALWAYS_OFF)
|
||||
"""Sampler that respects its parent span's sampling decision, but otherwise never samples."""
|
||||
|
||||
DEFAULT_ON = ParentBased(ALWAYS_ON)
|
||||
"""Sampler that respects its parent span's sampling decision, but otherwise always samples."""
|
||||
|
||||
|
||||
class ParentBasedTraceIdRatio(ParentBased):
|
||||
"""
|
||||
Sampler that respects its parent span's sampling decision, but otherwise
|
||||
samples probabilistically based on `rate`.
|
||||
"""
|
||||
|
||||
def __init__(self, rate: float):
|
||||
root = TraceIdRatioBased(rate=rate)
|
||||
super().__init__(root=root)
|
||||
|
||||
|
||||
class _AlwaysOff(StaticSampler):
|
||||
def __init__(self, _):
|
||||
super().__init__(Decision.DROP)
|
||||
|
||||
|
||||
class _AlwaysOn(StaticSampler):
|
||||
def __init__(self, _):
|
||||
super().__init__(Decision.RECORD_AND_SAMPLE)
|
||||
|
||||
|
||||
class _ParentBasedAlwaysOff(ParentBased):
|
||||
def __init__(self, _):
|
||||
super().__init__(ALWAYS_OFF)
|
||||
|
||||
|
||||
class _ParentBasedAlwaysOn(ParentBased):
|
||||
def __init__(self, _):
|
||||
super().__init__(ALWAYS_ON)
|
||||
|
||||
|
||||
_KNOWN_SAMPLERS = {
|
||||
"always_on": ALWAYS_ON,
|
||||
"always_off": ALWAYS_OFF,
|
||||
"parentbased_always_on": DEFAULT_ON,
|
||||
"parentbased_always_off": DEFAULT_OFF,
|
||||
"traceidratio": TraceIdRatioBased,
|
||||
"parentbased_traceidratio": ParentBasedTraceIdRatio,
|
||||
}
|
||||
|
||||
|
||||
def _get_from_env_or_default() -> Sampler:
|
||||
trace_sampler = os.getenv(
|
||||
OTEL_TRACES_SAMPLER, "parentbased_always_on"
|
||||
).lower()
|
||||
if trace_sampler not in _KNOWN_SAMPLERS:
|
||||
_logger.warning("Couldn't recognize sampler %s.", trace_sampler)
|
||||
trace_sampler = "parentbased_always_on"
|
||||
|
||||
if trace_sampler in ("traceidratio", "parentbased_traceidratio"):
|
||||
try:
|
||||
rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG))
|
||||
except (ValueError, TypeError):
|
||||
_logger.warning("Could not convert TRACES_SAMPLER_ARG to float.")
|
||||
rate = 1.0
|
||||
return _KNOWN_SAMPLERS[trace_sampler](rate)
|
||||
|
||||
return _KNOWN_SAMPLERS[trace_sampler]
|
||||
|
||||
|
||||
def _get_parent_trace_state(
|
||||
parent_context: Optional[Context],
|
||||
) -> Optional["TraceState"]:
|
||||
parent_span_context = get_current_span(parent_context).get_span_context()
|
||||
if parent_span_context is None or not parent_span_context.is_valid:
|
||||
return None
|
||||
return parent_span_context.trace_state
|
||||
@@ -0,0 +1,152 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import datetime
|
||||
import threading
|
||||
from collections import deque
|
||||
from collections.abc import MutableMapping, Sequence
|
||||
from typing import Optional
|
||||
|
||||
from deprecated import deprecated
|
||||
|
||||
|
||||
def ns_to_iso_str(nanoseconds):
|
||||
"""Get an ISO 8601 string from time_ns value."""
|
||||
ts = datetime.datetime.fromtimestamp(
|
||||
nanoseconds / 1e9, tz=datetime.timezone.utc
|
||||
)
|
||||
return ts.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
|
||||
|
||||
|
||||
def get_dict_as_key(labels):
|
||||
"""Converts a dict to be used as a unique key"""
|
||||
return tuple(
|
||||
sorted(
|
||||
map(
|
||||
lambda kv: (
|
||||
(kv[0], tuple(kv[1])) if isinstance(kv[1], list) else kv
|
||||
),
|
||||
labels.items(),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class BoundedList(Sequence):
|
||||
"""An append only list with a fixed max size.
|
||||
|
||||
Calls to `append` and `extend` will drop the oldest elements if there is
|
||||
not enough room.
|
||||
"""
|
||||
|
||||
def __init__(self, maxlen: Optional[int]):
|
||||
self.dropped = 0
|
||||
self._dq = deque(maxlen=maxlen) # type: deque
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def __repr__(self):
|
||||
return f"{type(self).__name__}({list(self._dq)}, maxlen={self._dq.maxlen})"
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self._dq[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._dq)
|
||||
|
||||
def __iter__(self):
|
||||
with self._lock:
|
||||
return iter(deque(self._dq))
|
||||
|
||||
def append(self, item):
|
||||
with self._lock:
|
||||
if (
|
||||
self._dq.maxlen is not None
|
||||
and len(self._dq) == self._dq.maxlen
|
||||
):
|
||||
self.dropped += 1
|
||||
self._dq.append(item)
|
||||
|
||||
def extend(self, seq):
|
||||
with self._lock:
|
||||
if self._dq.maxlen is not None:
|
||||
to_drop = len(seq) + len(self._dq) - self._dq.maxlen
|
||||
if to_drop > 0:
|
||||
self.dropped += to_drop
|
||||
self._dq.extend(seq)
|
||||
|
||||
@classmethod
|
||||
def from_seq(cls, maxlen, seq):
|
||||
seq = tuple(seq)
|
||||
bounded_list = cls(maxlen)
|
||||
bounded_list.extend(seq)
|
||||
return bounded_list
|
||||
|
||||
|
||||
@deprecated(version="1.4.0") # type: ignore
|
||||
class BoundedDict(MutableMapping):
|
||||
"""An ordered dict with a fixed max capacity.
|
||||
|
||||
Oldest elements are dropped when the dict is full and a new element is
|
||||
added.
|
||||
"""
|
||||
|
||||
def __init__(self, maxlen: Optional[int]):
|
||||
if maxlen is not None:
|
||||
if not isinstance(maxlen, int):
|
||||
raise ValueError
|
||||
if maxlen < 0:
|
||||
raise ValueError
|
||||
self.maxlen = maxlen
|
||||
self.dropped = 0
|
||||
self._dict = {} # type: dict
|
||||
self._lock = threading.Lock() # type: threading.Lock
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
f"{type(self).__name__}({dict(self._dict)}, maxlen={self.maxlen})"
|
||||
)
|
||||
|
||||
def __getitem__(self, key):
|
||||
return self._dict[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
with self._lock:
|
||||
if self.maxlen is not None and self.maxlen == 0:
|
||||
self.dropped += 1
|
||||
return
|
||||
|
||||
if key in self._dict:
|
||||
del self._dict[key]
|
||||
elif self.maxlen is not None and len(self._dict) == self.maxlen:
|
||||
del self._dict[next(iter(self._dict.keys()))]
|
||||
self.dropped += 1
|
||||
self._dict[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
del self._dict[key]
|
||||
|
||||
def __iter__(self):
|
||||
with self._lock:
|
||||
return iter(self._dict.copy())
|
||||
|
||||
def __len__(self):
|
||||
return len(self._dict)
|
||||
|
||||
@classmethod
|
||||
def from_map(cls, maxlen, mapping):
|
||||
mapping = dict(mapping)
|
||||
bounded_dict = cls(maxlen)
|
||||
for key, value in mapping.items():
|
||||
bounded_dict[key] = value
|
||||
return bounded_dict
|
||||
@@ -0,0 +1,74 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import (
|
||||
Iterable,
|
||||
Iterator,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Sequence,
|
||||
TypeVar,
|
||||
overload,
|
||||
)
|
||||
|
||||
from opentelemetry.util.types import AttributesAsKey, AttributeValue
|
||||
|
||||
_T = TypeVar("_T")
|
||||
_KT = TypeVar("_KT")
|
||||
_VT = TypeVar("_VT")
|
||||
|
||||
def ns_to_iso_str(nanoseconds: int) -> str: ...
|
||||
def get_dict_as_key(
|
||||
labels: Mapping[str, AttributeValue],
|
||||
) -> AttributesAsKey: ...
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
class BoundedList(Sequence[_T]):
|
||||
"""An append only list with a fixed max size.
|
||||
|
||||
Calls to `append` and `extend` will drop the oldest elements if there is
|
||||
not enough room.
|
||||
"""
|
||||
|
||||
dropped: int
|
||||
def __init__(self, maxlen: int): ...
|
||||
def insert(self, index: int, value: _T) -> None: ...
|
||||
@overload
|
||||
def __getitem__(self, i: int) -> _T: ...
|
||||
@overload
|
||||
def __getitem__(self, s: slice) -> Sequence[_T]: ...
|
||||
def __len__(self) -> int: ...
|
||||
def append(self, item: _T) -> None: ...
|
||||
def extend(self, seq: Sequence[_T]) -> None: ...
|
||||
@classmethod
|
||||
def from_seq(cls, maxlen: int, seq: Iterable[_T]) -> BoundedList[_T]: ... # pylint: disable=undefined-variable
|
||||
|
||||
class BoundedDict(MutableMapping[_KT, _VT]):
|
||||
"""An ordered dict with a fixed max capacity.
|
||||
|
||||
Oldest elements are dropped when the dict is full and a new element is
|
||||
added.
|
||||
"""
|
||||
|
||||
dropped: int
|
||||
def __init__(self, maxlen: int): ...
|
||||
def __getitem__(self, k: _KT) -> _VT: ...
|
||||
def __setitem__(self, k: _KT, v: _VT) -> None: ...
|
||||
def __delitem__(self, v: _KT) -> None: ...
|
||||
def __iter__(self) -> Iterator[_KT]: ...
|
||||
def __len__(self) -> int: ...
|
||||
@classmethod
|
||||
def from_map(
|
||||
cls, maxlen: int, mapping: Mapping[_KT, _VT]
|
||||
) -> BoundedDict[_KT, _VT]: ... # pylint: disable=undefined-variable
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,167 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from json import dumps
|
||||
from typing import Optional
|
||||
|
||||
from deprecated import deprecated
|
||||
|
||||
from opentelemetry.attributes import BoundedAttributes
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
class InstrumentationInfo:
|
||||
"""Immutable information about an instrumentation library module.
|
||||
|
||||
See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
|
||||
properties.
|
||||
"""
|
||||
|
||||
__slots__ = ("_name", "_version", "_schema_url")
|
||||
|
||||
@deprecated(version="1.11.1", reason="You should use InstrumentationScope")
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
):
|
||||
self._name = name
|
||||
self._version = version
|
||||
if schema_url is None:
|
||||
schema_url = ""
|
||||
self._schema_url = schema_url
|
||||
|
||||
def __repr__(self):
|
||||
return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url})"
|
||||
|
||||
def __hash__(self):
|
||||
return hash((self._name, self._version, self._schema_url))
|
||||
|
||||
def __eq__(self, value):
|
||||
return type(value) is type(self) and (
|
||||
self._name,
|
||||
self._version,
|
||||
self._schema_url,
|
||||
) == (value._name, value._version, value._schema_url)
|
||||
|
||||
def __lt__(self, value):
|
||||
if type(value) is not type(self):
|
||||
return NotImplemented
|
||||
return (self._name, self._version, self._schema_url) < (
|
||||
value._name,
|
||||
value._version,
|
||||
value._schema_url,
|
||||
)
|
||||
|
||||
@property
|
||||
def schema_url(self) -> Optional[str]:
|
||||
return self._schema_url
|
||||
|
||||
@property
|
||||
def version(self) -> Optional[str]:
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self._name
|
||||
|
||||
|
||||
class InstrumentationScope:
|
||||
"""A logical unit of the application code with which the emitted telemetry can be
|
||||
associated.
|
||||
|
||||
See `opentelemetry.trace.TracerProvider.get_tracer` for the meaning of these
|
||||
properties.
|
||||
"""
|
||||
|
||||
__slots__ = ("_name", "_version", "_schema_url", "_attributes")
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> None:
|
||||
self._name = name
|
||||
self._version = version
|
||||
if schema_url is None:
|
||||
schema_url = ""
|
||||
self._schema_url = schema_url
|
||||
self._attributes = BoundedAttributes(attributes=attributes)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{type(self).__name__}({self._name}, {self._version}, {self._schema_url}, {self._attributes})"
|
||||
|
||||
def __hash__(self) -> int:
|
||||
return hash((self._name, self._version, self._schema_url))
|
||||
|
||||
def __eq__(self, value: object) -> bool:
|
||||
if not isinstance(value, InstrumentationScope):
|
||||
return NotImplemented
|
||||
return (
|
||||
self._name,
|
||||
self._version,
|
||||
self._schema_url,
|
||||
self._attributes,
|
||||
) == (
|
||||
value._name,
|
||||
value._version,
|
||||
value._schema_url,
|
||||
value._attributes,
|
||||
)
|
||||
|
||||
def __lt__(self, value: object) -> bool:
|
||||
if not isinstance(value, InstrumentationScope):
|
||||
return NotImplemented
|
||||
return (
|
||||
self._name,
|
||||
self._version,
|
||||
self._schema_url,
|
||||
self._attributes,
|
||||
) < (
|
||||
value._name,
|
||||
value._version,
|
||||
value._schema_url,
|
||||
value._attributes,
|
||||
)
|
||||
|
||||
@property
|
||||
def schema_url(self) -> Optional[str]:
|
||||
return self._schema_url
|
||||
|
||||
@property
|
||||
def version(self) -> Optional[str]:
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def attributes(self) -> Attributes:
|
||||
return self._attributes
|
||||
|
||||
def to_json(self, indent: Optional[int] = 4) -> str:
|
||||
return dumps(
|
||||
{
|
||||
"name": self._name,
|
||||
"version": self._version,
|
||||
"schema_url": self._schema_url,
|
||||
"attributes": (
|
||||
dict(self._attributes) if bool(self._attributes) else None
|
||||
),
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
@@ -0,0 +1,15 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "1.32.1"
|
||||
Binary file not shown.
Reference in New Issue
Block a user