structure saas with tools
This commit is contained in:
@@ -0,0 +1,221 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from typing import Any, Optional, cast
|
||||
|
||||
from opentelemetry._logs import LogRecord
|
||||
from opentelemetry._logs.severity import SeverityNumber
|
||||
from opentelemetry.environment_variables import (
|
||||
_OTEL_PYTHON_EVENT_LOGGER_PROVIDER,
|
||||
)
|
||||
from opentelemetry.trace.span import TraceFlags
|
||||
from opentelemetry.util._once import Once
|
||||
from opentelemetry.util._providers import _load_provider
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class Event(LogRecord):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
timestamp: Optional[int] = None,
|
||||
trace_id: Optional[int] = None,
|
||||
span_id: Optional[int] = None,
|
||||
trace_flags: Optional["TraceFlags"] = None,
|
||||
body: Optional[Any] = None,
|
||||
severity_number: Optional[SeverityNumber] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
):
|
||||
attributes = attributes or {}
|
||||
event_attributes = {**attributes, "event.name": name}
|
||||
super().__init__(
|
||||
timestamp=timestamp,
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
trace_flags=trace_flags,
|
||||
body=body, # type: ignore
|
||||
severity_number=severity_number,
|
||||
attributes=event_attributes,
|
||||
)
|
||||
self.name = name
|
||||
|
||||
|
||||
class EventLogger(ABC):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
):
|
||||
self._name = name
|
||||
self._version = version
|
||||
self._schema_url = schema_url
|
||||
self._attributes = attributes
|
||||
|
||||
@abstractmethod
|
||||
def emit(self, event: "Event") -> None:
|
||||
"""Emits a :class:`Event` representing an event."""
|
||||
|
||||
|
||||
class NoOpEventLogger(EventLogger):
|
||||
def emit(self, event: Event) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class ProxyEventLogger(EventLogger):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
):
|
||||
super().__init__(
|
||||
name=name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
self._real_event_logger: Optional[EventLogger] = None
|
||||
self._noop_event_logger = NoOpEventLogger(name)
|
||||
|
||||
@property
|
||||
def _event_logger(self) -> EventLogger:
|
||||
if self._real_event_logger:
|
||||
return self._real_event_logger
|
||||
|
||||
if _EVENT_LOGGER_PROVIDER:
|
||||
self._real_event_logger = _EVENT_LOGGER_PROVIDER.get_event_logger(
|
||||
self._name,
|
||||
self._version,
|
||||
self._schema_url,
|
||||
self._attributes,
|
||||
)
|
||||
return self._real_event_logger
|
||||
return self._noop_event_logger
|
||||
|
||||
def emit(self, event: Event) -> None:
|
||||
self._event_logger.emit(event)
|
||||
|
||||
|
||||
class EventLoggerProvider(ABC):
|
||||
@abstractmethod
|
||||
def get_event_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> EventLogger:
|
||||
"""Returns an EventLoggerProvider for use."""
|
||||
|
||||
|
||||
class NoOpEventLoggerProvider(EventLoggerProvider):
|
||||
def get_event_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> EventLogger:
|
||||
return NoOpEventLogger(
|
||||
name, version=version, schema_url=schema_url, attributes=attributes
|
||||
)
|
||||
|
||||
|
||||
class ProxyEventLoggerProvider(EventLoggerProvider):
|
||||
def get_event_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> EventLogger:
|
||||
if _EVENT_LOGGER_PROVIDER:
|
||||
return _EVENT_LOGGER_PROVIDER.get_event_logger(
|
||||
name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
return ProxyEventLogger(
|
||||
name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
|
||||
|
||||
_EVENT_LOGGER_PROVIDER_SET_ONCE = Once()
|
||||
_EVENT_LOGGER_PROVIDER: Optional[EventLoggerProvider] = None
|
||||
_PROXY_EVENT_LOGGER_PROVIDER = ProxyEventLoggerProvider()
|
||||
|
||||
|
||||
def get_event_logger_provider() -> EventLoggerProvider:
|
||||
global _EVENT_LOGGER_PROVIDER # pylint: disable=global-variable-not-assigned
|
||||
if _EVENT_LOGGER_PROVIDER is None:
|
||||
if _OTEL_PYTHON_EVENT_LOGGER_PROVIDER not in environ:
|
||||
return _PROXY_EVENT_LOGGER_PROVIDER
|
||||
|
||||
event_logger_provider: EventLoggerProvider = _load_provider( # type: ignore
|
||||
_OTEL_PYTHON_EVENT_LOGGER_PROVIDER, "event_logger_provider"
|
||||
)
|
||||
|
||||
_set_event_logger_provider(event_logger_provider, log=False)
|
||||
|
||||
return cast("EventLoggerProvider", _EVENT_LOGGER_PROVIDER)
|
||||
|
||||
|
||||
def _set_event_logger_provider(
|
||||
event_logger_provider: EventLoggerProvider, log: bool
|
||||
) -> None:
|
||||
def set_elp() -> None:
|
||||
global _EVENT_LOGGER_PROVIDER # pylint: disable=global-statement
|
||||
_EVENT_LOGGER_PROVIDER = event_logger_provider
|
||||
|
||||
did_set = _EVENT_LOGGER_PROVIDER_SET_ONCE.do_once(set_elp)
|
||||
|
||||
if log and not did_set:
|
||||
_logger.warning(
|
||||
"Overriding of current EventLoggerProvider is not allowed"
|
||||
)
|
||||
|
||||
|
||||
def set_event_logger_provider(
|
||||
event_logger_provider: EventLoggerProvider,
|
||||
) -> None:
|
||||
_set_event_logger_provider(event_logger_provider, log=True)
|
||||
|
||||
|
||||
def get_event_logger(
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
event_logger_provider: Optional[EventLoggerProvider] = None,
|
||||
) -> "EventLogger":
|
||||
if event_logger_provider is None:
|
||||
event_logger_provider = get_event_logger_provider()
|
||||
return event_logger_provider.get_event_logger(
|
||||
name,
|
||||
version,
|
||||
schema_url,
|
||||
attributes,
|
||||
)
|
||||
Binary file not shown.
@@ -0,0 +1,59 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
The OpenTelemetry logging API describes the classes used to generate logs and events.
|
||||
|
||||
The :class:`.LoggerProvider` provides users access to the :class:`.Logger`.
|
||||
|
||||
This module provides abstract (i.e. unimplemented) classes required for
|
||||
logging, and a concrete no-op implementation :class:`.NoOpLogger` that allows applications
|
||||
to use the API package alone without a supporting implementation.
|
||||
|
||||
To get a logger, you need to provide the package name from which you are
|
||||
calling the logging APIs to OpenTelemetry by calling `LoggerProvider.get_logger`
|
||||
with the calling module name and the version of your package.
|
||||
|
||||
The following code shows how to obtain a logger using the global :class:`.LoggerProvider`::
|
||||
|
||||
from opentelemetry._logs import get_logger
|
||||
|
||||
logger = get_logger("example-logger")
|
||||
|
||||
.. versionadded:: 1.15.0
|
||||
"""
|
||||
|
||||
from opentelemetry._logs._internal import (
|
||||
Logger,
|
||||
LoggerProvider,
|
||||
LogRecord,
|
||||
NoOpLogger,
|
||||
NoOpLoggerProvider,
|
||||
get_logger,
|
||||
get_logger_provider,
|
||||
set_logger_provider,
|
||||
)
|
||||
from opentelemetry._logs.severity import SeverityNumber, std_to_otel
|
||||
|
||||
__all__ = [
|
||||
"Logger",
|
||||
"LoggerProvider",
|
||||
"LogRecord",
|
||||
"NoOpLogger",
|
||||
"NoOpLoggerProvider",
|
||||
"get_logger",
|
||||
"get_logger_provider",
|
||||
"set_logger_provider",
|
||||
"SeverityNumber",
|
||||
"std_to_otel",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,292 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""
|
||||
The OpenTelemetry logging API describes the classes used to generate logs and events.
|
||||
|
||||
The :class:`.LoggerProvider` provides users access to the :class:`.Logger`.
|
||||
|
||||
This module provides abstract (i.e. unimplemented) classes required for
|
||||
logging, and a concrete no-op implementation :class:`.NoOpLogger` that allows applications
|
||||
to use the API package alone without a supporting implementation.
|
||||
|
||||
To get a logger, you need to provide the package name from which you are
|
||||
calling the logging APIs to OpenTelemetry by calling `LoggerProvider.get_logger`
|
||||
with the calling module name and the version of your package.
|
||||
|
||||
The following code shows how to obtain a logger using the global :class:`.LoggerProvider`::
|
||||
|
||||
from opentelemetry._logs import get_logger
|
||||
|
||||
logger = get_logger("example-logger")
|
||||
|
||||
.. versionadded:: 1.15.0
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from time import time_ns
|
||||
from typing import Any, Optional, cast
|
||||
|
||||
from opentelemetry._logs.severity import SeverityNumber
|
||||
from opentelemetry.environment_variables import _OTEL_PYTHON_LOGGER_PROVIDER
|
||||
from opentelemetry.trace.span import TraceFlags
|
||||
from opentelemetry.util._once import Once
|
||||
from opentelemetry.util._providers import _load_provider
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class LogRecord(ABC):
|
||||
"""A LogRecord instance represents an event being logged.
|
||||
|
||||
LogRecord instances are created and emitted via `Logger`
|
||||
every time something is logged. They contain all the information
|
||||
pertinent to the event being logged.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timestamp: Optional[int] = None,
|
||||
observed_timestamp: Optional[int] = None,
|
||||
trace_id: Optional[int] = None,
|
||||
span_id: Optional[int] = None,
|
||||
trace_flags: Optional["TraceFlags"] = None,
|
||||
severity_text: Optional[str] = None,
|
||||
severity_number: Optional[SeverityNumber] = None,
|
||||
body: Optional[Any] = None,
|
||||
attributes: Optional["Attributes"] = None,
|
||||
):
|
||||
self.timestamp = timestamp
|
||||
if observed_timestamp is None:
|
||||
observed_timestamp = time_ns()
|
||||
self.observed_timestamp = observed_timestamp
|
||||
self.trace_id = trace_id
|
||||
self.span_id = span_id
|
||||
self.trace_flags = trace_flags
|
||||
self.severity_text = severity_text
|
||||
self.severity_number = severity_number
|
||||
self.body = body # type: ignore
|
||||
self.attributes = attributes
|
||||
|
||||
|
||||
class Logger(ABC):
|
||||
"""Handles emitting events and logs via `LogRecord`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._name = name
|
||||
self._version = version
|
||||
self._schema_url = schema_url
|
||||
self._attributes = attributes
|
||||
|
||||
@abstractmethod
|
||||
def emit(self, record: "LogRecord") -> None:
|
||||
"""Emits a :class:`LogRecord` representing a log to the processing pipeline."""
|
||||
|
||||
|
||||
class NoOpLogger(Logger):
|
||||
"""The default Logger used when no Logger implementation is available.
|
||||
|
||||
All operations are no-op.
|
||||
"""
|
||||
|
||||
def emit(self, record: "LogRecord") -> None:
|
||||
pass
|
||||
|
||||
|
||||
class ProxyLogger(Logger):
|
||||
def __init__( # pylint: disable=super-init-not-called
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
):
|
||||
self._name = name
|
||||
self._version = version
|
||||
self._schema_url = schema_url
|
||||
self._attributes = attributes
|
||||
self._real_logger: Optional[Logger] = None
|
||||
self._noop_logger = NoOpLogger(name)
|
||||
|
||||
@property
|
||||
def _logger(self) -> Logger:
|
||||
if self._real_logger:
|
||||
return self._real_logger
|
||||
|
||||
if _LOGGER_PROVIDER:
|
||||
self._real_logger = _LOGGER_PROVIDER.get_logger(
|
||||
self._name,
|
||||
self._version,
|
||||
self._schema_url,
|
||||
self._attributes,
|
||||
)
|
||||
return self._real_logger
|
||||
return self._noop_logger
|
||||
|
||||
def emit(self, record: LogRecord) -> None:
|
||||
self._logger.emit(record)
|
||||
|
||||
|
||||
class LoggerProvider(ABC):
|
||||
"""
|
||||
LoggerProvider is the entry point of the API. It provides access to Logger instances.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> Logger:
|
||||
"""Returns a `Logger` for use by the given instrumentation library.
|
||||
|
||||
For any two calls it is undefined whether the same or different
|
||||
`Logger` instances are returned, even for different library names.
|
||||
|
||||
This function may return different `Logger` types (e.g. a no-op logger
|
||||
vs. a functional logger).
|
||||
|
||||
Args:
|
||||
name: The name of the instrumenting module.
|
||||
``__name__`` may not be used as this can result in
|
||||
different logger names if the loggers are in different files.
|
||||
It is better to use a fixed string that can be imported where
|
||||
needed and used consistently as the name of the logger.
|
||||
|
||||
This should *not* be the name of the module that is
|
||||
instrumented but the name of the module doing the instrumentation.
|
||||
E.g., instead of ``"requests"``, use
|
||||
``"opentelemetry.instrumentation.requests"``.
|
||||
|
||||
version: Optional. The version string of the
|
||||
instrumenting library. Usually this should be the same as
|
||||
``importlib.metadata.version(instrumenting_library_name)``.
|
||||
|
||||
schema_url: Optional. Specifies the Schema URL of the emitted telemetry.
|
||||
"""
|
||||
|
||||
|
||||
class NoOpLoggerProvider(LoggerProvider):
|
||||
"""The default LoggerProvider used when no LoggerProvider implementation is available."""
|
||||
|
||||
def get_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> Logger:
|
||||
"""Returns a NoOpLogger."""
|
||||
return NoOpLogger(
|
||||
name, version=version, schema_url=schema_url, attributes=attributes
|
||||
)
|
||||
|
||||
|
||||
class ProxyLoggerProvider(LoggerProvider):
|
||||
def get_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> Logger:
|
||||
if _LOGGER_PROVIDER:
|
||||
return _LOGGER_PROVIDER.get_logger(
|
||||
name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
return ProxyLogger(
|
||||
name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
|
||||
|
||||
_LOGGER_PROVIDER_SET_ONCE = Once()
|
||||
_LOGGER_PROVIDER: Optional[LoggerProvider] = None
|
||||
_PROXY_LOGGER_PROVIDER = ProxyLoggerProvider()
|
||||
|
||||
|
||||
def get_logger_provider() -> LoggerProvider:
|
||||
"""Gets the current global :class:`~.LoggerProvider` object."""
|
||||
global _LOGGER_PROVIDER # pylint: disable=global-variable-not-assigned
|
||||
if _LOGGER_PROVIDER is None:
|
||||
if _OTEL_PYTHON_LOGGER_PROVIDER not in environ:
|
||||
return _PROXY_LOGGER_PROVIDER
|
||||
|
||||
logger_provider: LoggerProvider = _load_provider( # type: ignore
|
||||
_OTEL_PYTHON_LOGGER_PROVIDER, "logger_provider"
|
||||
)
|
||||
_set_logger_provider(logger_provider, log=False)
|
||||
|
||||
# _LOGGER_PROVIDER will have been set by one thread
|
||||
return cast("LoggerProvider", _LOGGER_PROVIDER)
|
||||
|
||||
|
||||
def _set_logger_provider(logger_provider: LoggerProvider, log: bool) -> None:
|
||||
def set_lp() -> None:
|
||||
global _LOGGER_PROVIDER # pylint: disable=global-statement
|
||||
_LOGGER_PROVIDER = logger_provider
|
||||
|
||||
did_set = _LOGGER_PROVIDER_SET_ONCE.do_once(set_lp)
|
||||
|
||||
if log and not did_set:
|
||||
_logger.warning("Overriding of current LoggerProvider is not allowed")
|
||||
|
||||
|
||||
def set_logger_provider(logger_provider: LoggerProvider) -> None:
|
||||
"""Sets the current global :class:`~.LoggerProvider` object.
|
||||
|
||||
This can only be done once, a warning will be logged if any further attempt
|
||||
is made.
|
||||
"""
|
||||
_set_logger_provider(logger_provider, log=True)
|
||||
|
||||
|
||||
def get_logger(
|
||||
instrumenting_module_name: str,
|
||||
instrumenting_library_version: str = "",
|
||||
logger_provider: Optional[LoggerProvider] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> "Logger":
|
||||
"""Returns a `Logger` for use within a python process.
|
||||
|
||||
This function is a convenience wrapper for
|
||||
opentelemetry.sdk._logs.LoggerProvider.get_logger.
|
||||
|
||||
If logger_provider param is omitted the current configured one is used.
|
||||
"""
|
||||
if logger_provider is None:
|
||||
logger_provider = get_logger_provider()
|
||||
return logger_provider.get_logger(
|
||||
instrumenting_module_name,
|
||||
instrumenting_library_version,
|
||||
schema_url,
|
||||
attributes,
|
||||
)
|
||||
Binary file not shown.
@@ -0,0 +1,115 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import enum
|
||||
|
||||
|
||||
class SeverityNumber(enum.Enum):
|
||||
"""Numerical value of severity.
|
||||
|
||||
Smaller numerical values correspond to less severe events
|
||||
(such as debug events), larger numerical values correspond
|
||||
to more severe events (such as errors and critical events).
|
||||
|
||||
See the `Log Data Model`_ spec for more info and how to map the
|
||||
severity from source format to OTLP Model.
|
||||
|
||||
.. _Log Data Model: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
|
||||
"""
|
||||
|
||||
UNSPECIFIED = 0
|
||||
TRACE = 1
|
||||
TRACE2 = 2
|
||||
TRACE3 = 3
|
||||
TRACE4 = 4
|
||||
DEBUG = 5
|
||||
DEBUG2 = 6
|
||||
DEBUG3 = 7
|
||||
DEBUG4 = 8
|
||||
INFO = 9
|
||||
INFO2 = 10
|
||||
INFO3 = 11
|
||||
INFO4 = 12
|
||||
WARN = 13
|
||||
WARN2 = 14
|
||||
WARN3 = 15
|
||||
WARN4 = 16
|
||||
ERROR = 17
|
||||
ERROR2 = 18
|
||||
ERROR3 = 19
|
||||
ERROR4 = 20
|
||||
FATAL = 21
|
||||
FATAL2 = 22
|
||||
FATAL3 = 23
|
||||
FATAL4 = 24
|
||||
|
||||
|
||||
_STD_TO_OTEL = {
|
||||
10: SeverityNumber.DEBUG,
|
||||
11: SeverityNumber.DEBUG2,
|
||||
12: SeverityNumber.DEBUG3,
|
||||
13: SeverityNumber.DEBUG4,
|
||||
14: SeverityNumber.DEBUG4,
|
||||
15: SeverityNumber.DEBUG4,
|
||||
16: SeverityNumber.DEBUG4,
|
||||
17: SeverityNumber.DEBUG4,
|
||||
18: SeverityNumber.DEBUG4,
|
||||
19: SeverityNumber.DEBUG4,
|
||||
20: SeverityNumber.INFO,
|
||||
21: SeverityNumber.INFO2,
|
||||
22: SeverityNumber.INFO3,
|
||||
23: SeverityNumber.INFO4,
|
||||
24: SeverityNumber.INFO4,
|
||||
25: SeverityNumber.INFO4,
|
||||
26: SeverityNumber.INFO4,
|
||||
27: SeverityNumber.INFO4,
|
||||
28: SeverityNumber.INFO4,
|
||||
29: SeverityNumber.INFO4,
|
||||
30: SeverityNumber.WARN,
|
||||
31: SeverityNumber.WARN2,
|
||||
32: SeverityNumber.WARN3,
|
||||
33: SeverityNumber.WARN4,
|
||||
34: SeverityNumber.WARN4,
|
||||
35: SeverityNumber.WARN4,
|
||||
36: SeverityNumber.WARN4,
|
||||
37: SeverityNumber.WARN4,
|
||||
38: SeverityNumber.WARN4,
|
||||
39: SeverityNumber.WARN4,
|
||||
40: SeverityNumber.ERROR,
|
||||
41: SeverityNumber.ERROR2,
|
||||
42: SeverityNumber.ERROR3,
|
||||
43: SeverityNumber.ERROR4,
|
||||
44: SeverityNumber.ERROR4,
|
||||
45: SeverityNumber.ERROR4,
|
||||
46: SeverityNumber.ERROR4,
|
||||
47: SeverityNumber.ERROR4,
|
||||
48: SeverityNumber.ERROR4,
|
||||
49: SeverityNumber.ERROR4,
|
||||
50: SeverityNumber.FATAL,
|
||||
51: SeverityNumber.FATAL2,
|
||||
52: SeverityNumber.FATAL3,
|
||||
53: SeverityNumber.FATAL4,
|
||||
}
|
||||
|
||||
|
||||
def std_to_otel(levelno: int) -> SeverityNumber:
|
||||
"""
|
||||
Map python log levelno as defined in https://docs.python.org/3/library/logging.html#logging-levels
|
||||
to OTel log severity number as defined here: https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#field-severitynumber
|
||||
"""
|
||||
if levelno < 10:
|
||||
return SeverityNumber.UNSPECIFIED
|
||||
if levelno > 53:
|
||||
return SeverityNumber.FATAL4
|
||||
return _STD_TO_OTEL[levelno]
|
||||
Binary file not shown.
@@ -0,0 +1,215 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import threading
|
||||
from collections import OrderedDict
|
||||
from collections.abc import MutableMapping
|
||||
from typing import Mapping, Optional, Sequence, Tuple, Union
|
||||
|
||||
from opentelemetry.util import types
|
||||
|
||||
# bytes are accepted as a user supplied value for attributes but
|
||||
# decoded to strings internally.
|
||||
_VALID_ATTR_VALUE_TYPES = (bool, str, bytes, int, float)
|
||||
# AnyValue possible values
|
||||
_VALID_ANY_VALUE_TYPES = (
|
||||
type(None),
|
||||
bool,
|
||||
bytes,
|
||||
int,
|
||||
float,
|
||||
str,
|
||||
Sequence,
|
||||
Mapping,
|
||||
)
|
||||
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _clean_attribute(
|
||||
key: str, value: types.AttributeValue, max_len: Optional[int]
|
||||
) -> Optional[Union[types.AttributeValue, Tuple[Union[str, int, float], ...]]]:
|
||||
"""Checks if attribute value is valid and cleans it if required.
|
||||
|
||||
The function returns the cleaned value or None if the value is not valid.
|
||||
|
||||
An attribute value is valid if it is either:
|
||||
- A primitive type: string, boolean, double precision floating
|
||||
point (IEEE 754-1985) or integer.
|
||||
- An array of primitive type values. The array MUST be homogeneous,
|
||||
i.e. it MUST NOT contain values of different types.
|
||||
|
||||
An attribute needs cleansing if:
|
||||
- Its length is greater than the maximum allowed length.
|
||||
- It needs to be encoded/decoded e.g, bytes to strings.
|
||||
"""
|
||||
|
||||
if not (key and isinstance(key, str)):
|
||||
_logger.warning("invalid key `%s`. must be non-empty string.", key)
|
||||
return None
|
||||
|
||||
if isinstance(value, _VALID_ATTR_VALUE_TYPES):
|
||||
return _clean_attribute_value(value, max_len)
|
||||
|
||||
if isinstance(value, Sequence):
|
||||
sequence_first_valid_type = None
|
||||
cleaned_seq = []
|
||||
|
||||
for element in value:
|
||||
element = _clean_attribute_value(element, max_len) # type: ignore
|
||||
if element is None:
|
||||
cleaned_seq.append(element)
|
||||
continue
|
||||
|
||||
element_type = type(element)
|
||||
# Reject attribute value if sequence contains a value with an incompatible type.
|
||||
if element_type not in _VALID_ATTR_VALUE_TYPES:
|
||||
_logger.warning(
|
||||
"Invalid type %s in attribute '%s' value sequence. Expected one of "
|
||||
"%s or None",
|
||||
element_type.__name__,
|
||||
key,
|
||||
[
|
||||
valid_type.__name__
|
||||
for valid_type in _VALID_ATTR_VALUE_TYPES
|
||||
],
|
||||
)
|
||||
return None
|
||||
|
||||
# The type of the sequence must be homogeneous. The first non-None
|
||||
# element determines the type of the sequence
|
||||
if sequence_first_valid_type is None:
|
||||
sequence_first_valid_type = element_type
|
||||
# use equality instead of isinstance as isinstance(True, int) evaluates to True
|
||||
elif element_type != sequence_first_valid_type:
|
||||
_logger.warning(
|
||||
"Attribute %r mixes types %s and %s in attribute value sequence",
|
||||
key,
|
||||
sequence_first_valid_type.__name__,
|
||||
type(element).__name__,
|
||||
)
|
||||
return None
|
||||
|
||||
cleaned_seq.append(element)
|
||||
|
||||
# Freeze mutable sequences defensively
|
||||
return tuple(cleaned_seq)
|
||||
|
||||
_logger.warning(
|
||||
"Invalid type %s for attribute '%s' value. Expected one of %s or a "
|
||||
"sequence of those types",
|
||||
type(value).__name__,
|
||||
key,
|
||||
[valid_type.__name__ for valid_type in _VALID_ATTR_VALUE_TYPES],
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _clean_attribute_value(
|
||||
value: types.AttributeValue, limit: Optional[int]
|
||||
) -> Optional[types.AttributeValue]:
|
||||
if value is None:
|
||||
return None
|
||||
|
||||
if isinstance(value, bytes):
|
||||
try:
|
||||
value = value.decode()
|
||||
except UnicodeDecodeError:
|
||||
_logger.warning("Byte attribute could not be decoded.")
|
||||
return None
|
||||
|
||||
if limit is not None and isinstance(value, str):
|
||||
value = value[:limit]
|
||||
return value
|
||||
|
||||
|
||||
class BoundedAttributes(MutableMapping): # type: ignore
|
||||
"""An ordered dict with a fixed max capacity.
|
||||
|
||||
Oldest elements are dropped when the dict is full and a new element is
|
||||
added.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
maxlen: Optional[int] = None,
|
||||
attributes: types.Attributes = None,
|
||||
immutable: bool = True,
|
||||
max_value_len: Optional[int] = None,
|
||||
):
|
||||
if maxlen is not None:
|
||||
if not isinstance(maxlen, int) or maxlen < 0:
|
||||
raise ValueError(
|
||||
"maxlen must be valid int greater or equal to 0"
|
||||
)
|
||||
self.maxlen = maxlen
|
||||
self.dropped = 0
|
||||
self.max_value_len = max_value_len
|
||||
# OrderedDict is not used until the maxlen is reached for efficiency.
|
||||
|
||||
self._dict: Union[
|
||||
MutableMapping[str, types.AttributeValue],
|
||||
OrderedDict[str, types.AttributeValue],
|
||||
] = {}
|
||||
self._lock = threading.RLock()
|
||||
if attributes:
|
||||
for key, value in attributes.items():
|
||||
self[key] = value
|
||||
self._immutable = immutable
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{dict(self._dict)}"
|
||||
|
||||
def __getitem__(self, key: str) -> types.AttributeValue:
|
||||
return self._dict[key]
|
||||
|
||||
def __setitem__(self, key: str, value: types.AttributeValue) -> None:
|
||||
if getattr(self, "_immutable", False): # type: ignore
|
||||
raise TypeError
|
||||
with self._lock:
|
||||
if self.maxlen is not None and self.maxlen == 0:
|
||||
self.dropped += 1
|
||||
return
|
||||
|
||||
value = _clean_attribute(key, value, self.max_value_len) # type: ignore
|
||||
if value is not None:
|
||||
if key in self._dict:
|
||||
del self._dict[key]
|
||||
elif (
|
||||
self.maxlen is not None and len(self._dict) == self.maxlen
|
||||
):
|
||||
if not isinstance(self._dict, OrderedDict):
|
||||
self._dict = OrderedDict(self._dict)
|
||||
self._dict.popitem(last=False) # type: ignore
|
||||
self.dropped += 1
|
||||
|
||||
self._dict[key] = value # type: ignore
|
||||
|
||||
def __delitem__(self, key: str) -> None:
|
||||
if getattr(self, "_immutable", False): # type: ignore
|
||||
raise TypeError
|
||||
with self._lock:
|
||||
del self._dict[key]
|
||||
|
||||
def __iter__(self): # type: ignore
|
||||
with self._lock:
|
||||
return iter(self._dict.copy()) # type: ignore
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._dict)
|
||||
|
||||
def copy(self): # type: ignore
|
||||
return self._dict.copy() # type: ignore
|
||||
Binary file not shown.
@@ -0,0 +1,136 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from logging import getLogger
|
||||
from re import compile
|
||||
from types import MappingProxyType
|
||||
from typing import Dict, Mapping, Optional
|
||||
|
||||
from opentelemetry.context import create_key, get_value, set_value
|
||||
from opentelemetry.context.context import Context
|
||||
from opentelemetry.util.re import (
|
||||
_BAGGAGE_PROPERTY_FORMAT,
|
||||
_KEY_FORMAT,
|
||||
_VALUE_FORMAT,
|
||||
)
|
||||
|
||||
_BAGGAGE_KEY = create_key("baggage")
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
_KEY_PATTERN = compile(_KEY_FORMAT)
|
||||
_VALUE_PATTERN = compile(_VALUE_FORMAT)
|
||||
_PROPERT_PATTERN = compile(_BAGGAGE_PROPERTY_FORMAT)
|
||||
|
||||
|
||||
def get_all(
|
||||
context: Optional[Context] = None,
|
||||
) -> Mapping[str, object]:
|
||||
"""Returns the name/value pairs in the Baggage
|
||||
|
||||
Args:
|
||||
context: The Context to use. If not set, uses current Context
|
||||
|
||||
Returns:
|
||||
The name/value pairs in the Baggage
|
||||
"""
|
||||
return MappingProxyType(_get_baggage_value(context=context))
|
||||
|
||||
|
||||
def get_baggage(
|
||||
name: str, context: Optional[Context] = None
|
||||
) -> Optional[object]:
|
||||
"""Provides access to the value for a name/value pair in the
|
||||
Baggage
|
||||
|
||||
Args:
|
||||
name: The name of the value to retrieve
|
||||
context: The Context to use. If not set, uses current Context
|
||||
|
||||
Returns:
|
||||
The value associated with the given name, or null if the given name is
|
||||
not present.
|
||||
"""
|
||||
return _get_baggage_value(context=context).get(name)
|
||||
|
||||
|
||||
def set_baggage(
|
||||
name: str, value: object, context: Optional[Context] = None
|
||||
) -> Context:
|
||||
"""Sets a value in the Baggage
|
||||
|
||||
Args:
|
||||
name: The name of the value to set
|
||||
value: The value to set
|
||||
context: The Context to use. If not set, uses current Context
|
||||
|
||||
Returns:
|
||||
A Context with the value updated
|
||||
"""
|
||||
baggage = _get_baggage_value(context=context).copy()
|
||||
baggage[name] = value
|
||||
return set_value(_BAGGAGE_KEY, baggage, context=context)
|
||||
|
||||
|
||||
def remove_baggage(name: str, context: Optional[Context] = None) -> Context:
|
||||
"""Removes a value from the Baggage
|
||||
|
||||
Args:
|
||||
name: The name of the value to remove
|
||||
context: The Context to use. If not set, uses current Context
|
||||
|
||||
Returns:
|
||||
A Context with the name/value removed
|
||||
"""
|
||||
baggage = _get_baggage_value(context=context).copy()
|
||||
baggage.pop(name, None)
|
||||
|
||||
return set_value(_BAGGAGE_KEY, baggage, context=context)
|
||||
|
||||
|
||||
def clear(context: Optional[Context] = None) -> Context:
|
||||
"""Removes all values from the Baggage
|
||||
|
||||
Args:
|
||||
context: The Context to use. If not set, uses current Context
|
||||
|
||||
Returns:
|
||||
A Context with all baggage entries removed
|
||||
"""
|
||||
return set_value(_BAGGAGE_KEY, {}, context=context)
|
||||
|
||||
|
||||
def _get_baggage_value(context: Optional[Context] = None) -> Dict[str, object]:
|
||||
baggage = get_value(_BAGGAGE_KEY, context=context)
|
||||
if isinstance(baggage, dict):
|
||||
return baggage
|
||||
return {}
|
||||
|
||||
|
||||
def _is_valid_key(name: str) -> bool:
|
||||
return _KEY_PATTERN.fullmatch(str(name)) is not None
|
||||
|
||||
|
||||
def _is_valid_value(value: object) -> bool:
|
||||
parts = str(value).split(";")
|
||||
is_valid_value = _VALUE_PATTERN.fullmatch(parts[0]) is not None
|
||||
if len(parts) > 1: # one or more properties metadata
|
||||
for property in parts[1:]:
|
||||
if _PROPERT_PATTERN.fullmatch(property) is None:
|
||||
is_valid_value = False
|
||||
break
|
||||
return is_valid_value
|
||||
|
||||
|
||||
def _is_valid_pair(key: str, value: str) -> bool:
|
||||
return _is_valid_key(key) and _is_valid_value(value)
|
||||
Binary file not shown.
@@ -0,0 +1,146 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
from logging import getLogger
|
||||
from re import split
|
||||
from typing import Iterable, List, Mapping, Optional, Set
|
||||
from urllib.parse import quote_plus, unquote_plus
|
||||
|
||||
from opentelemetry.baggage import _is_valid_pair, get_all, set_baggage
|
||||
from opentelemetry.context import get_current
|
||||
from opentelemetry.context.context import Context
|
||||
from opentelemetry.propagators import textmap
|
||||
from opentelemetry.util.re import _DELIMITER_PATTERN
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class W3CBaggagePropagator(textmap.TextMapPropagator):
|
||||
"""Extracts and injects Baggage which is used to annotate telemetry."""
|
||||
|
||||
_MAX_HEADER_LENGTH = 8192
|
||||
_MAX_PAIR_LENGTH = 4096
|
||||
_MAX_PAIRS = 180
|
||||
_BAGGAGE_HEADER_NAME = "baggage"
|
||||
|
||||
def extract(
|
||||
self,
|
||||
carrier: textmap.CarrierT,
|
||||
context: Optional[Context] = None,
|
||||
getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter,
|
||||
) -> Context:
|
||||
"""Extract Baggage from the carrier.
|
||||
|
||||
See
|
||||
`opentelemetry.propagators.textmap.TextMapPropagator.extract`
|
||||
"""
|
||||
|
||||
if context is None:
|
||||
context = get_current()
|
||||
|
||||
header = _extract_first_element(
|
||||
getter.get(carrier, self._BAGGAGE_HEADER_NAME)
|
||||
)
|
||||
|
||||
if not header:
|
||||
return context
|
||||
|
||||
if len(header) > self._MAX_HEADER_LENGTH:
|
||||
_logger.warning(
|
||||
"Baggage header `%s` exceeded the maximum number of bytes per baggage-string",
|
||||
header,
|
||||
)
|
||||
return context
|
||||
|
||||
baggage_entries: List[str] = split(_DELIMITER_PATTERN, header)
|
||||
total_baggage_entries = self._MAX_PAIRS
|
||||
|
||||
if len(baggage_entries) > self._MAX_PAIRS:
|
||||
_logger.warning(
|
||||
"Baggage header `%s` exceeded the maximum number of list-members",
|
||||
header,
|
||||
)
|
||||
|
||||
for entry in baggage_entries:
|
||||
if len(entry) > self._MAX_PAIR_LENGTH:
|
||||
_logger.warning(
|
||||
"Baggage entry `%s` exceeded the maximum number of bytes per list-member",
|
||||
entry,
|
||||
)
|
||||
continue
|
||||
if not entry: # empty string
|
||||
continue
|
||||
try:
|
||||
name, value = entry.split("=", 1)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.warning(
|
||||
"Baggage list-member `%s` doesn't match the format", entry
|
||||
)
|
||||
continue
|
||||
|
||||
if not _is_valid_pair(name, value):
|
||||
_logger.warning("Invalid baggage entry: `%s`", entry)
|
||||
continue
|
||||
|
||||
name = unquote_plus(name).strip()
|
||||
value = unquote_plus(value).strip()
|
||||
|
||||
context = set_baggage(
|
||||
name,
|
||||
value,
|
||||
context=context,
|
||||
)
|
||||
total_baggage_entries -= 1
|
||||
if total_baggage_entries == 0:
|
||||
break
|
||||
|
||||
return context
|
||||
|
||||
def inject(
|
||||
self,
|
||||
carrier: textmap.CarrierT,
|
||||
context: Optional[Context] = None,
|
||||
setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter,
|
||||
) -> None:
|
||||
"""Injects Baggage into the carrier.
|
||||
|
||||
See
|
||||
`opentelemetry.propagators.textmap.TextMapPropagator.inject`
|
||||
"""
|
||||
baggage_entries = get_all(context=context)
|
||||
if not baggage_entries:
|
||||
return
|
||||
|
||||
baggage_string = _format_baggage(baggage_entries)
|
||||
setter.set(carrier, self._BAGGAGE_HEADER_NAME, baggage_string)
|
||||
|
||||
@property
|
||||
def fields(self) -> Set[str]:
|
||||
"""Returns a set with the fields set in `inject`."""
|
||||
return {self._BAGGAGE_HEADER_NAME}
|
||||
|
||||
|
||||
def _format_baggage(baggage_entries: Mapping[str, object]) -> str:
|
||||
return ",".join(
|
||||
quote_plus(str(key)) + "=" + quote_plus(str(value))
|
||||
for key, value in baggage_entries.items()
|
||||
)
|
||||
|
||||
|
||||
def _extract_first_element(
|
||||
items: Optional[Iterable[textmap.CarrierT]],
|
||||
) -> Optional[textmap.CarrierT]:
|
||||
if items is None:
|
||||
return None
|
||||
return next(iter(items), None)
|
||||
Binary file not shown.
@@ -0,0 +1,176 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import typing
|
||||
from contextvars import Token
|
||||
from os import environ
|
||||
from uuid import uuid4
|
||||
|
||||
# pylint: disable=wrong-import-position
|
||||
from opentelemetry.context.context import Context, _RuntimeContext # noqa
|
||||
from opentelemetry.environment_variables import OTEL_PYTHON_CONTEXT
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _load_runtime_context() -> _RuntimeContext:
|
||||
"""Initialize the RuntimeContext
|
||||
|
||||
Returns:
|
||||
An instance of RuntimeContext.
|
||||
"""
|
||||
|
||||
# FIXME use a better implementation of a configuration manager
|
||||
# to avoid having to get configuration values straight from
|
||||
# environment variables
|
||||
default_context = "contextvars_context"
|
||||
|
||||
configured_context = environ.get(OTEL_PYTHON_CONTEXT, default_context) # type: str
|
||||
|
||||
try:
|
||||
return next( # type: ignore
|
||||
iter( # type: ignore
|
||||
entry_points( # type: ignore
|
||||
group="opentelemetry_context",
|
||||
name=configured_context,
|
||||
)
|
||||
)
|
||||
).load()()
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
logger.exception(
|
||||
"Failed to load context: %s, fallback to %s",
|
||||
configured_context,
|
||||
default_context,
|
||||
)
|
||||
return next( # type: ignore
|
||||
iter( # type: ignore
|
||||
entry_points( # type: ignore
|
||||
group="opentelemetry_context",
|
||||
name=default_context,
|
||||
)
|
||||
)
|
||||
).load()()
|
||||
|
||||
|
||||
_RUNTIME_CONTEXT = _load_runtime_context()
|
||||
|
||||
|
||||
def create_key(keyname: str) -> str:
|
||||
"""To allow cross-cutting concern to control access to their local state,
|
||||
the RuntimeContext API provides a function which takes a keyname as input,
|
||||
and returns a unique key.
|
||||
Args:
|
||||
keyname: The key name is for debugging purposes and is not required to be unique.
|
||||
Returns:
|
||||
A unique string representing the newly created key.
|
||||
"""
|
||||
return keyname + "-" + str(uuid4())
|
||||
|
||||
|
||||
def get_value(key: str, context: typing.Optional[Context] = None) -> "object":
|
||||
"""To access the local state of a concern, the RuntimeContext API
|
||||
provides a function which takes a context and a key as input,
|
||||
and returns a value.
|
||||
|
||||
Args:
|
||||
key: The key of the value to retrieve.
|
||||
context: The context from which to retrieve the value, if None, the current context is used.
|
||||
|
||||
Returns:
|
||||
The value associated with the key.
|
||||
"""
|
||||
return context.get(key) if context is not None else get_current().get(key)
|
||||
|
||||
|
||||
def set_value(
|
||||
key: str, value: "object", context: typing.Optional[Context] = None
|
||||
) -> Context:
|
||||
"""To record the local state of a cross-cutting concern, the
|
||||
RuntimeContext API provides a function which takes a context, a
|
||||
key, and a value as input, and returns an updated context
|
||||
which contains the new value.
|
||||
|
||||
Args:
|
||||
key: The key of the entry to set.
|
||||
value: The value of the entry to set.
|
||||
context: The context to copy, if None, the current context is used.
|
||||
|
||||
Returns:
|
||||
A new `Context` containing the value set.
|
||||
"""
|
||||
if context is None:
|
||||
context = get_current()
|
||||
new_values = context.copy()
|
||||
new_values[key] = value
|
||||
return Context(new_values)
|
||||
|
||||
|
||||
def get_current() -> Context:
|
||||
"""To access the context associated with program execution,
|
||||
the Context API provides a function which takes no arguments
|
||||
and returns a Context.
|
||||
|
||||
Returns:
|
||||
The current `Context` object.
|
||||
"""
|
||||
return _RUNTIME_CONTEXT.get_current()
|
||||
|
||||
|
||||
def attach(context: Context) -> Token[Context]:
|
||||
"""Associates a Context with the caller's current execution unit. Returns
|
||||
a token that can be used to restore the previous Context.
|
||||
|
||||
Args:
|
||||
context: The Context to set as current.
|
||||
|
||||
Returns:
|
||||
A token that can be used with `detach` to reset the context.
|
||||
"""
|
||||
return _RUNTIME_CONTEXT.attach(context)
|
||||
|
||||
|
||||
def detach(token: Token[Context]) -> None:
|
||||
"""Resets the Context associated with the caller's current execution unit
|
||||
to the value it had before attaching a specified Context.
|
||||
|
||||
Args:
|
||||
token: The Token that was returned by a previous call to attach a Context.
|
||||
"""
|
||||
try:
|
||||
_RUNTIME_CONTEXT.detach(token)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
logger.exception("Failed to detach context")
|
||||
|
||||
|
||||
# FIXME This is a temporary location for the suppress instrumentation key.
|
||||
# Once the decision around how to suppress instrumentation is made in the
|
||||
# spec, this key should be moved accordingly.
|
||||
_SUPPRESS_INSTRUMENTATION_KEY = create_key("suppress_instrumentation")
|
||||
_SUPPRESS_HTTP_INSTRUMENTATION_KEY = create_key(
|
||||
"suppress_http_instrumentation"
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"Context",
|
||||
"attach",
|
||||
"create_key",
|
||||
"detach",
|
||||
"get_current",
|
||||
"get_value",
|
||||
"set_value",
|
||||
]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,56 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import typing
|
||||
from abc import ABC, abstractmethod
|
||||
from contextvars import Token
|
||||
|
||||
|
||||
class Context(typing.Dict[str, object]):
|
||||
def __setitem__(self, key: str, value: object) -> None:
|
||||
raise ValueError
|
||||
|
||||
|
||||
class _RuntimeContext(ABC):
|
||||
"""The RuntimeContext interface provides a wrapper for the different
|
||||
mechanisms that are used to propagate context in Python.
|
||||
Implementations can be made available via entry_points and
|
||||
selected through environment variables.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def attach(self, context: Context) -> Token[Context]:
|
||||
"""Sets the current `Context` object. Returns a
|
||||
token that can be used to reset to the previous `Context`.
|
||||
|
||||
Args:
|
||||
context: The Context to set.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_current(self) -> Context:
|
||||
"""Returns the current `Context` object."""
|
||||
|
||||
@abstractmethod
|
||||
def detach(self, token: Token[Context]) -> None:
|
||||
"""Resets Context to a previous value
|
||||
|
||||
Args:
|
||||
token: A reference to a previous Context.
|
||||
"""
|
||||
|
||||
|
||||
__all__ = ["Context"]
|
||||
@@ -0,0 +1,56 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from contextvars import ContextVar, Token
|
||||
|
||||
from opentelemetry.context.context import Context, _RuntimeContext
|
||||
|
||||
|
||||
class ContextVarsRuntimeContext(_RuntimeContext):
|
||||
"""An implementation of the RuntimeContext interface which wraps ContextVar under
|
||||
the hood. This is the preferred implementation for usage with Python 3.5+
|
||||
"""
|
||||
|
||||
_CONTEXT_KEY = "current_context"
|
||||
|
||||
def __init__(self) -> None:
|
||||
self._current_context = ContextVar(
|
||||
self._CONTEXT_KEY, default=Context()
|
||||
)
|
||||
|
||||
def attach(self, context: Context) -> Token[Context]:
|
||||
"""Sets the current `Context` object. Returns a
|
||||
token that can be used to reset to the previous `Context`.
|
||||
|
||||
Args:
|
||||
context: The Context to set.
|
||||
"""
|
||||
return self._current_context.set(context)
|
||||
|
||||
def get_current(self) -> Context:
|
||||
"""Returns the current `Context` object."""
|
||||
return self._current_context.get()
|
||||
|
||||
def detach(self, token: Token[Context]) -> None:
|
||||
"""Resets Context to a previous value
|
||||
|
||||
Args:
|
||||
token: A reference to a previous Context.
|
||||
"""
|
||||
self._current_context.reset(token)
|
||||
|
||||
|
||||
__all__ = ["ContextVarsRuntimeContext"]
|
||||
@@ -0,0 +1,88 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OTEL_LOGS_EXPORTER = "OTEL_LOGS_EXPORTER"
|
||||
"""
|
||||
.. envvar:: OTEL_LOGS_EXPORTER
|
||||
|
||||
"""
|
||||
|
||||
OTEL_METRICS_EXPORTER = "OTEL_METRICS_EXPORTER"
|
||||
"""
|
||||
.. envvar:: OTEL_METRICS_EXPORTER
|
||||
|
||||
Specifies which exporter is used for metrics. See `General SDK Configuration
|
||||
<https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/#otel_metrics_exporter>`_.
|
||||
|
||||
**Default value:** ``"otlp"``
|
||||
|
||||
**Example:**
|
||||
|
||||
``export OTEL_METRICS_EXPORTER="prometheus"``
|
||||
|
||||
Accepted values for ``OTEL_METRICS_EXPORTER`` are:
|
||||
|
||||
- ``"otlp"``
|
||||
- ``"prometheus"``
|
||||
- ``"none"``: No automatically configured exporter for metrics.
|
||||
|
||||
.. note::
|
||||
|
||||
Exporter packages may add entry points for group ``opentelemetry_metrics_exporter`` which
|
||||
can then be used with this environment variable by name. The entry point should point to
|
||||
either a `opentelemetry.sdk.metrics.export.MetricExporter` (push exporter) or
|
||||
`opentelemetry.sdk.metrics.export.MetricReader` (pull exporter) subclass; it must be
|
||||
constructable without any required arguments. This mechanism is considered experimental and
|
||||
may change in subsequent releases.
|
||||
"""
|
||||
|
||||
OTEL_PROPAGATORS = "OTEL_PROPAGATORS"
|
||||
"""
|
||||
.. envvar:: OTEL_PROPAGATORS
|
||||
"""
|
||||
|
||||
OTEL_PYTHON_CONTEXT = "OTEL_PYTHON_CONTEXT"
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_CONTEXT
|
||||
"""
|
||||
|
||||
OTEL_PYTHON_ID_GENERATOR = "OTEL_PYTHON_ID_GENERATOR"
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_ID_GENERATOR
|
||||
"""
|
||||
|
||||
OTEL_TRACES_EXPORTER = "OTEL_TRACES_EXPORTER"
|
||||
"""
|
||||
.. envvar:: OTEL_TRACES_EXPORTER
|
||||
"""
|
||||
|
||||
OTEL_PYTHON_TRACER_PROVIDER = "OTEL_PYTHON_TRACER_PROVIDER"
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_TRACER_PROVIDER
|
||||
"""
|
||||
|
||||
OTEL_PYTHON_METER_PROVIDER = "OTEL_PYTHON_METER_PROVIDER"
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_METER_PROVIDER
|
||||
"""
|
||||
|
||||
_OTEL_PYTHON_LOGGER_PROVIDER = "OTEL_PYTHON_LOGGER_PROVIDER"
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_LOGGER_PROVIDER
|
||||
"""
|
||||
|
||||
_OTEL_PYTHON_EVENT_LOGGER_PROVIDER = "OTEL_PYTHON_EVENT_LOGGER_PROVIDER"
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_EVENT_LOGGER_PROVIDER
|
||||
"""
|
||||
Binary file not shown.
@@ -0,0 +1,552 @@
|
||||
# Copyright 2021 The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Cloud Trace Span Exporter for OpenTelemetry. Uses Cloud Trace Client's REST
|
||||
API to export traces and spans for viewing in Cloud Trace.
|
||||
|
||||
Usage
|
||||
-----
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry.exporter.cloud_trace import CloudTraceSpanExporter
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
|
||||
# For debugging
|
||||
from opentelemetry.sdk.trace.export import SimpleSpanProcessor
|
||||
# Otherwise
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor
|
||||
|
||||
trace.set_tracer_provider(TracerProvider())
|
||||
|
||||
cloud_trace_exporter = CloudTraceSpanExporter()
|
||||
trace.get_tracer_provider().add_span_processor(
|
||||
BatchSpanProcessor(cloud_trace_exporter)
|
||||
)
|
||||
tracer = trace.get_tracer(__name__)
|
||||
with tracer.start_as_current_span("foo"):
|
||||
print("Hello world!")
|
||||
|
||||
|
||||
When not debugging, make sure to use
|
||||
:class:`opentelemetry.sdk.trace.export.BatchSpanProcessor` with the
|
||||
default parameters for performance reasons.
|
||||
|
||||
Auto-instrumentation
|
||||
--------------------
|
||||
|
||||
This exporter can also be used with `OpenTelemetry auto-instrumentation
|
||||
<https://opentelemetry.io/docs/instrumentation/python/automatic/>`_:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
opentelemetry-instrument --traces_exporter gcp_trace <command> <args>
|
||||
|
||||
Configuration is supported through environment variables
|
||||
(:mod:`opentelemetry.exporter.cloud_trace.environment_variables`) or the corresponding command
|
||||
line arguments to ``opentelemetry-instrument``:
|
||||
|
||||
.. code-block:: sh
|
||||
|
||||
opentelemetry-instrument --traces_exporter gcp_trace \\
|
||||
--exporter_gcp_trace_project_id my-project \\
|
||||
<command> <args>
|
||||
|
||||
See ``opentelemetry-instrument --help`` for all configuration options.
|
||||
|
||||
API
|
||||
---
|
||||
"""
|
||||
|
||||
import logging
|
||||
import re
|
||||
from collections.abc import Sequence as SequenceABC
|
||||
from os import environ
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
Optional,
|
||||
Pattern,
|
||||
Sequence,
|
||||
Tuple,
|
||||
overload,
|
||||
)
|
||||
|
||||
import google.auth
|
||||
import opentelemetry.trace as trace_api
|
||||
from google.cloud.trace_v2 import BatchWriteSpansRequest, TraceServiceClient
|
||||
from google.cloud.trace_v2 import types as trace_types
|
||||
from google.cloud.trace_v2.services.trace_service.transports import (
|
||||
TraceServiceGrpcTransport,
|
||||
)
|
||||
from google.protobuf.timestamp_pb2 import ( # pylint: disable=no-name-in-module
|
||||
Timestamp,
|
||||
)
|
||||
from google.rpc import code_pb2, status_pb2
|
||||
from opentelemetry.exporter.cloud_trace.environment_variables import (
|
||||
OTEL_EXPORTER_GCP_TRACE_PROJECT_ID,
|
||||
OTEL_EXPORTER_GCP_TRACE_RESOURCE_REGEX,
|
||||
)
|
||||
from opentelemetry.exporter.cloud_trace.version import __version__
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import (
|
||||
_constants as _resource_constants,
|
||||
)
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector._mapping import (
|
||||
get_monitored_resource,
|
||||
)
|
||||
from opentelemetry.sdk import version as opentelemetry_sdk_version
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.trace import Event
|
||||
from opentelemetry.sdk.trace.export import (
|
||||
ReadableSpan,
|
||||
SpanExporter,
|
||||
SpanExportResult,
|
||||
)
|
||||
from opentelemetry.sdk.util import BoundedDict
|
||||
from opentelemetry.trace import format_span_id, format_trace_id
|
||||
from opentelemetry.trace.status import StatusCode
|
||||
from opentelemetry.util import types
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
_OTEL_SDK_VERSION = opentelemetry_sdk_version.__version__
|
||||
_USER_AGENT = f"opentelemetry-python {_OTEL_SDK_VERSION}; google-cloud-trace-exporter {__version__}"
|
||||
|
||||
# Set user-agent metadata, see https://github.com/grpc/grpc/issues/23644 and default options
|
||||
# from
|
||||
# https://github.com/googleapis/python-trace/blob/v1.7.3/google/cloud/trace_v1/services/trace_service/transports/grpc.py#L177-L180
|
||||
_OPTIONS = [
|
||||
("grpc.max_send_message_length", -1),
|
||||
("grpc.max_receive_message_length", -1),
|
||||
("grpc.primary_user_agent", _USER_AGENT),
|
||||
]
|
||||
|
||||
|
||||
MAX_NUM_LINKS = 128
|
||||
MAX_NUM_EVENTS = 32
|
||||
MAX_EVENT_ATTRS = 4
|
||||
MAX_LINK_ATTRS = 32
|
||||
MAX_SPAN_ATTRS = 32
|
||||
MAX_ATTR_KEY_BYTES = 128
|
||||
MAX_ATTR_VAL_BYTES = 16 * 1024 # 16 kilobytes
|
||||
|
||||
|
||||
def _create_default_client() -> TraceServiceClient:
|
||||
return TraceServiceClient(
|
||||
transport=TraceServiceGrpcTransport(
|
||||
channel=TraceServiceGrpcTransport.create_channel(options=_OPTIONS)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class CloudTraceSpanExporter(SpanExporter):
|
||||
"""Cloud Trace span exporter for OpenTelemetry.
|
||||
|
||||
Args:
|
||||
project_id: GCP project ID for the project to send spans to. Alternatively, can be
|
||||
configured with :envvar:`OTEL_EXPORTER_GCP_TRACE_PROJECT_ID`.
|
||||
client: Cloud Trace client. If not given, will be taken from gcloud
|
||||
default credentials
|
||||
resource_regex: Resource attributes with keys matching this regex will be added to
|
||||
exported spans as labels (default: None). Alternatively, can be configured with
|
||||
:envvar:`OTEL_EXPORTER_GCP_TRACE_RESOURCE_REGEX`.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
project_id=None,
|
||||
client=None,
|
||||
resource_regex=None,
|
||||
):
|
||||
self.client: TraceServiceClient = client or _create_default_client()
|
||||
|
||||
if not project_id:
|
||||
project_id = environ.get(OTEL_EXPORTER_GCP_TRACE_PROJECT_ID)
|
||||
if not project_id:
|
||||
_, project_id = google.auth.default()
|
||||
self.project_id = project_id
|
||||
|
||||
if not resource_regex:
|
||||
resource_regex = environ.get(
|
||||
OTEL_EXPORTER_GCP_TRACE_RESOURCE_REGEX
|
||||
)
|
||||
self.resource_regex = (
|
||||
re.compile(resource_regex) if resource_regex else None
|
||||
)
|
||||
|
||||
def export(self, spans: Sequence[ReadableSpan]) -> SpanExportResult:
|
||||
"""Export the spans to Cloud Trace.
|
||||
|
||||
See: https://cloud.google.com/trace/docs/reference/v2/rest/v2/projects.traces/batchWrite
|
||||
|
||||
Args:
|
||||
spans: Sequence of spans to export
|
||||
"""
|
||||
try:
|
||||
self.client.batch_write_spans(
|
||||
request=BatchWriteSpansRequest(
|
||||
name="projects/{}".format(self.project_id),
|
||||
spans=self._translate_to_cloud_trace(spans),
|
||||
)
|
||||
)
|
||||
# pylint: disable=broad-except
|
||||
except Exception as ex:
|
||||
logger.error("Error while writing to Cloud Trace", exc_info=ex)
|
||||
return SpanExportResult.FAILURE
|
||||
|
||||
return SpanExportResult.SUCCESS
|
||||
|
||||
def _translate_to_cloud_trace(
|
||||
self, spans: Sequence[ReadableSpan]
|
||||
) -> List[trace_types.Span]:
|
||||
"""Translate the spans to Cloud Trace format.
|
||||
|
||||
Args:
|
||||
spans: Sequence of spans to convert
|
||||
"""
|
||||
|
||||
cloud_trace_spans: List[trace_types.Span] = []
|
||||
|
||||
for span in spans:
|
||||
ctx = span.get_span_context()
|
||||
trace_id = format_trace_id(ctx.trace_id)
|
||||
span_id = format_span_id(ctx.span_id)
|
||||
span_name = "projects/{}/traces/{}/spans/{}".format(
|
||||
self.project_id, trace_id, span_id
|
||||
)
|
||||
|
||||
parent_id = None
|
||||
if span.parent:
|
||||
parent_id = format_span_id(span.parent.span_id)
|
||||
|
||||
start_time = _get_time_from_ns(span.start_time)
|
||||
end_time = _get_time_from_ns(span.end_time)
|
||||
|
||||
if span.attributes and len(span.attributes) > MAX_SPAN_ATTRS:
|
||||
logger.warning(
|
||||
"Span has more then %s attributes, some will be truncated",
|
||||
MAX_SPAN_ATTRS,
|
||||
)
|
||||
|
||||
# Span does not support a MonitoredResource object. We put the
|
||||
# information into attributes instead.
|
||||
resources_and_attrs = {
|
||||
**(span.attributes or {}),
|
||||
**_extract_resources(span.resource, self.resource_regex),
|
||||
}
|
||||
|
||||
cloud_trace_spans.append(
|
||||
trace_types.Span(
|
||||
name=span_name,
|
||||
span_id=span_id,
|
||||
display_name=_get_truncatable_str_object(span.name, 128),
|
||||
start_time=start_time,
|
||||
end_time=end_time,
|
||||
parent_span_id=parent_id,
|
||||
attributes=_extract_attributes(
|
||||
resources_and_attrs,
|
||||
MAX_SPAN_ATTRS,
|
||||
add_agent_attr=True,
|
||||
),
|
||||
links=_extract_links(span.links),
|
||||
status=_extract_status(span.status),
|
||||
time_events=_extract_events(span.events),
|
||||
span_kind=_extract_span_kind(span.kind),
|
||||
)
|
||||
)
|
||||
# TODO: Leverage more of the Cloud Trace API, e.g.
|
||||
# same_process_as_parent_span and child_span_count
|
||||
|
||||
return cloud_trace_spans
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
|
||||
def _get_time_from_ns(nanoseconds: Optional[int]) -> Optional[Timestamp]:
|
||||
"""Given epoch nanoseconds, split into epoch milliseconds and remaining
|
||||
nanoseconds"""
|
||||
if not nanoseconds:
|
||||
return None
|
||||
ts = Timestamp()
|
||||
# pylint: disable=no-member
|
||||
ts.FromNanoseconds(nanoseconds)
|
||||
return ts
|
||||
|
||||
|
||||
def _get_truncatable_str_object(str_to_convert: str, max_length: int):
|
||||
"""Truncate the string if it exceeds the length limit and record the
|
||||
truncated bytes count."""
|
||||
truncated, truncated_byte_count = _truncate_str(str_to_convert, max_length)
|
||||
|
||||
return trace_types.TruncatableString(
|
||||
value=truncated, truncated_byte_count=truncated_byte_count
|
||||
)
|
||||
|
||||
|
||||
def _truncate_str(str_to_check: str, limit: int) -> Tuple[str, int]:
|
||||
"""Check the length of a string. If exceeds limit, then truncate it."""
|
||||
encoded = str_to_check.encode("utf-8")
|
||||
truncated_str = encoded[:limit].decode("utf-8", errors="ignore")
|
||||
return truncated_str, len(encoded) - len(truncated_str.encode("utf-8"))
|
||||
|
||||
|
||||
def _extract_status(status: trace_api.Status) -> Optional[status_pb2.Status]:
|
||||
"""Convert a OTel Status to protobuf Status."""
|
||||
if status.status_code is StatusCode.UNSET:
|
||||
status_proto = None
|
||||
elif status.status_code is StatusCode.OK:
|
||||
status_proto = status_pb2.Status(code=code_pb2.OK)
|
||||
elif status.status_code is StatusCode.ERROR:
|
||||
status_proto = status_pb2.Status(
|
||||
code=code_pb2.UNKNOWN, message=status.description or ""
|
||||
)
|
||||
# future added value
|
||||
else:
|
||||
logger.info(
|
||||
"Couldn't handle OTel status code %s, assuming error",
|
||||
status.status_code,
|
||||
)
|
||||
status_proto = status_pb2.Status(
|
||||
code=code_pb2.UNKNOWN, message=status.description or ""
|
||||
)
|
||||
|
||||
return status_proto
|
||||
|
||||
|
||||
def _extract_links(
|
||||
links: Sequence[trace_api.Link],
|
||||
) -> Optional[trace_types.Span.Links]:
|
||||
"""Convert span.links"""
|
||||
if not links:
|
||||
return None
|
||||
extracted_links: List[trace_types.Span.Link] = []
|
||||
dropped_links = 0
|
||||
if len(links) > MAX_NUM_LINKS:
|
||||
logger.warning(
|
||||
"Exporting more then %s links, some will be truncated",
|
||||
MAX_NUM_LINKS,
|
||||
)
|
||||
dropped_links = len(links) - MAX_NUM_LINKS
|
||||
links = links[:MAX_NUM_LINKS]
|
||||
for link in links:
|
||||
link_attributes = link.attributes or {}
|
||||
if len(link_attributes) > MAX_LINK_ATTRS:
|
||||
logger.warning(
|
||||
"Link has more then %s attributes, some will be truncated",
|
||||
MAX_LINK_ATTRS,
|
||||
)
|
||||
trace_id = format_trace_id(link.context.trace_id)
|
||||
span_id = format_span_id(link.context.span_id)
|
||||
extracted_links.append(
|
||||
trace_types.Span.Link(
|
||||
trace_id=trace_id,
|
||||
span_id=span_id,
|
||||
type="TYPE_UNSPECIFIED",
|
||||
attributes=_extract_attributes(
|
||||
link_attributes, MAX_LINK_ATTRS
|
||||
),
|
||||
)
|
||||
)
|
||||
return trace_types.Span.Links(
|
||||
link=extracted_links, dropped_links_count=dropped_links
|
||||
)
|
||||
|
||||
|
||||
def _extract_events(
|
||||
events: Sequence[Event],
|
||||
) -> Optional[trace_types.Span.TimeEvents]:
|
||||
"""Convert span.events to dict."""
|
||||
if not events:
|
||||
return None
|
||||
time_events: List[trace_types.Span.TimeEvent] = []
|
||||
dropped_annontations = 0
|
||||
if len(events) > MAX_NUM_EVENTS:
|
||||
logger.warning(
|
||||
"Exporting more then %s annotations, some will be truncated",
|
||||
MAX_NUM_EVENTS,
|
||||
)
|
||||
dropped_annontations = len(events) - MAX_NUM_EVENTS
|
||||
events = events[:MAX_NUM_EVENTS]
|
||||
for event in events:
|
||||
if event.attributes and len(event.attributes) > MAX_EVENT_ATTRS:
|
||||
logger.warning(
|
||||
"Event %s has more then %s attributes, some will be truncated",
|
||||
event.name,
|
||||
MAX_EVENT_ATTRS,
|
||||
)
|
||||
time_events.append(
|
||||
trace_types.Span.TimeEvent(
|
||||
time=_get_time_from_ns(event.timestamp),
|
||||
annotation=trace_types.Span.TimeEvent.Annotation(
|
||||
description=_get_truncatable_str_object(event.name, 256),
|
||||
attributes=_extract_attributes(
|
||||
event.attributes, MAX_EVENT_ATTRS
|
||||
),
|
||||
),
|
||||
)
|
||||
)
|
||||
return trace_types.Span.TimeEvents(
|
||||
time_event=time_events,
|
||||
dropped_annotations_count=dropped_annontations,
|
||||
dropped_message_events_count=0,
|
||||
)
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
SPAN_KIND_MAPPING = {
|
||||
trace_api.SpanKind.INTERNAL: trace_types.Span.SpanKind.INTERNAL,
|
||||
trace_api.SpanKind.CLIENT: trace_types.Span.SpanKind.CLIENT,
|
||||
trace_api.SpanKind.SERVER: trace_types.Span.SpanKind.SERVER,
|
||||
trace_api.SpanKind.PRODUCER: trace_types.Span.SpanKind.PRODUCER,
|
||||
trace_api.SpanKind.CONSUMER: trace_types.Span.SpanKind.CONSUMER,
|
||||
}
|
||||
|
||||
|
||||
# pylint: disable=no-member
|
||||
def _extract_span_kind(
|
||||
span_kind: trace_api.SpanKind,
|
||||
) -> int:
|
||||
return SPAN_KIND_MAPPING.get(
|
||||
span_kind, trace_types.Span.SpanKind.SPAN_KIND_UNSPECIFIED
|
||||
)
|
||||
|
||||
|
||||
def _strip_characters(ot_version):
|
||||
return "".join(filter(lambda x: x.isdigit() or x == ".", ot_version))
|
||||
|
||||
|
||||
def _extract_resources(
|
||||
resource: Resource, resource_regex: Optional[Pattern] = None
|
||||
) -> Dict[str, str]:
|
||||
extracted_attributes = {}
|
||||
resource_attributes = resource.attributes
|
||||
if resource_regex:
|
||||
extracted_attributes.update(
|
||||
{
|
||||
k: str(v)
|
||||
for k, v in resource_attributes.items()
|
||||
if resource_regex.match(k)
|
||||
}
|
||||
)
|
||||
monitored_resource = get_monitored_resource(resource)
|
||||
# Do not map generic_task and generic_node to g.co/r/... span labels.
|
||||
if monitored_resource and monitored_resource.type not in (
|
||||
_resource_constants.GENERIC_NODE,
|
||||
_resource_constants.GENERIC_TASK,
|
||||
):
|
||||
extracted_attributes.update(
|
||||
{
|
||||
"g.co/r/{}/{}".format(monitored_resource.type, k): v
|
||||
for k, v in monitored_resource.labels.items()
|
||||
}
|
||||
)
|
||||
return extracted_attributes
|
||||
|
||||
|
||||
LABELS_MAPPING = {
|
||||
# this one might be http.flavor? I'm not sure
|
||||
"http.scheme": "/http/client_protocol",
|
||||
"http.host": "/http/host",
|
||||
"http.method": "/http/method",
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/http.md#common-attributes
|
||||
"http.request_content_length": "/http/request/size",
|
||||
"http.response_content_length": "/http/response/size",
|
||||
"http.route": "/http/route",
|
||||
"http.status_code": "/http/status_code",
|
||||
"http.url": "/http/url",
|
||||
"http.user_agent": "/http/user_agent",
|
||||
}
|
||||
|
||||
|
||||
def _extract_attributes(
|
||||
attrs: types.Attributes,
|
||||
num_attrs_limit: int,
|
||||
add_agent_attr: bool = False,
|
||||
) -> trace_types.Span.Attributes:
|
||||
"""Convert span.attributes to dict."""
|
||||
attributes_dict: BoundedDict[
|
||||
str, trace_types.AttributeValue
|
||||
] = BoundedDict(num_attrs_limit)
|
||||
invalid_value_dropped_count = 0
|
||||
for ot_key, ot_value in attrs.items() if attrs else []:
|
||||
key = _truncate_str(ot_key, MAX_ATTR_KEY_BYTES)[0]
|
||||
if key in LABELS_MAPPING: # pylint: disable=consider-using-get
|
||||
key = LABELS_MAPPING[key]
|
||||
value = _format_attribute_value(ot_value)
|
||||
|
||||
if value is not None:
|
||||
attributes_dict[key] = value
|
||||
else:
|
||||
invalid_value_dropped_count += 1
|
||||
if add_agent_attr:
|
||||
attributes_dict["g.co/agent"] = _format_attribute_value(
|
||||
"opentelemetry-python {}; google-cloud-trace-exporter {}".format(
|
||||
_strip_characters(_OTEL_SDK_VERSION),
|
||||
_strip_characters(__version__),
|
||||
)
|
||||
)
|
||||
return trace_types.Span.Attributes(
|
||||
attribute_map=dict(attributes_dict),
|
||||
dropped_attributes_count=attributes_dict.dropped
|
||||
+ invalid_value_dropped_count,
|
||||
)
|
||||
|
||||
|
||||
@overload
|
||||
def _format_attribute_value(
|
||||
value: types.AttributeValue,
|
||||
) -> trace_types.AttributeValue:
|
||||
...
|
||||
|
||||
|
||||
@overload
|
||||
def _format_attribute_value(
|
||||
value: Any,
|
||||
) -> Optional[trace_types.AttributeValue]:
|
||||
...
|
||||
|
||||
|
||||
def _format_attribute_value(
|
||||
value,
|
||||
) -> Optional[trace_types.AttributeValue]:
|
||||
if isinstance(value, bool):
|
||||
value_type = "bool_value"
|
||||
elif isinstance(value, int):
|
||||
value_type = "int_value"
|
||||
elif isinstance(value, str):
|
||||
value_type = "string_value"
|
||||
value = _get_truncatable_str_object(value, MAX_ATTR_VAL_BYTES)
|
||||
elif isinstance(value, float):
|
||||
value_type = "string_value"
|
||||
value = _get_truncatable_str_object(
|
||||
"{:0.4f}".format(value), MAX_ATTR_VAL_BYTES
|
||||
)
|
||||
elif isinstance(value, SequenceABC):
|
||||
value_type = "string_value"
|
||||
value = _get_truncatable_str_object(
|
||||
",".join(str(x) for x in value), MAX_ATTR_VAL_BYTES
|
||||
)
|
||||
else:
|
||||
logger.warning(
|
||||
"ignoring attribute value %s of type %s. Values type must be one "
|
||||
"of bool, int, string or float, or a sequence of these",
|
||||
value,
|
||||
type(value),
|
||||
)
|
||||
return None
|
||||
|
||||
return trace_types.AttributeValue(**{value_type: value})
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,32 @@
|
||||
# Copyright 2022 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OTEL_EXPORTER_GCP_TRACE_PROJECT_ID = "OTEL_EXPORTER_GCP_TRACE_PROJECT_ID"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_GCP_TRACE_PROJECT_ID
|
||||
|
||||
GCP project ID for the project to send spans to. Equivalent to constructor parameter to
|
||||
:class:`opentelemetry.exporter.cloud_trace.CloudTraceSpanExporter`.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_GCP_TRACE_RESOURCE_REGEX = (
|
||||
"OTEL_EXPORTER_GCP_TRACE_RESOURCE_REGEX"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_GCP_TRACE_RESOURCE_REGEX
|
||||
|
||||
Resource attributes with keys matching this regex will be added to exported spans as labels
|
||||
:class:`opentelemetry.exporter.cloud_trace.CloudTraceSpanExporter`. Equivalent to constructor parameter to
|
||||
:class:`opentelemetry.exporter.cloud_trace.CloudTraceSpanExporter`.
|
||||
"""
|
||||
@@ -0,0 +1,15 @@
|
||||
# Copyright 2021 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "1.9.0"
|
||||
@@ -0,0 +1,132 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
The OpenTelemetry metrics API describes the classes used to generate
|
||||
metrics.
|
||||
|
||||
The :class:`.MeterProvider` provides users access to the :class:`.Meter` which in
|
||||
turn is used to create :class:`.Instrument` objects. The :class:`.Instrument` objects are
|
||||
used to record measurements.
|
||||
|
||||
This module provides abstract (i.e. unimplemented) classes required for
|
||||
metrics, and a concrete no-op implementation :class:`.NoOpMeter` that allows applications
|
||||
to use the API package alone without a supporting implementation.
|
||||
|
||||
To get a meter, you need to provide the package name from which you are
|
||||
calling the meter APIs to OpenTelemetry by calling `MeterProvider.get_meter`
|
||||
with the calling instrumentation name and the version of your package.
|
||||
|
||||
The following code shows how to obtain a meter using the global :class:`.MeterProvider`::
|
||||
|
||||
from opentelemetry.metrics import get_meter
|
||||
|
||||
meter = get_meter("example-meter")
|
||||
counter = meter.create_counter("example-counter")
|
||||
|
||||
.. versionadded:: 1.10.0
|
||||
.. versionchanged:: 1.12.0rc
|
||||
"""
|
||||
|
||||
from opentelemetry.metrics._internal import (
|
||||
Meter,
|
||||
MeterProvider,
|
||||
NoOpMeter,
|
||||
NoOpMeterProvider,
|
||||
get_meter,
|
||||
get_meter_provider,
|
||||
set_meter_provider,
|
||||
)
|
||||
from opentelemetry.metrics._internal.instrument import (
|
||||
Asynchronous,
|
||||
CallbackOptions,
|
||||
CallbackT,
|
||||
Counter,
|
||||
Histogram,
|
||||
Instrument,
|
||||
NoOpCounter,
|
||||
NoOpHistogram,
|
||||
NoOpObservableCounter,
|
||||
NoOpObservableGauge,
|
||||
NoOpObservableUpDownCounter,
|
||||
NoOpUpDownCounter,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
Synchronous,
|
||||
UpDownCounter,
|
||||
)
|
||||
from opentelemetry.metrics._internal.instrument import Gauge as _Gauge
|
||||
from opentelemetry.metrics._internal.instrument import NoOpGauge as _NoOpGauge
|
||||
from opentelemetry.metrics._internal.observation import Observation
|
||||
|
||||
for obj in [
|
||||
Counter,
|
||||
Synchronous,
|
||||
Asynchronous,
|
||||
CallbackOptions,
|
||||
_Gauge,
|
||||
_NoOpGauge,
|
||||
get_meter_provider,
|
||||
get_meter,
|
||||
Histogram,
|
||||
Meter,
|
||||
MeterProvider,
|
||||
Instrument,
|
||||
NoOpCounter,
|
||||
NoOpHistogram,
|
||||
NoOpMeter,
|
||||
NoOpMeterProvider,
|
||||
NoOpObservableCounter,
|
||||
NoOpObservableGauge,
|
||||
NoOpObservableUpDownCounter,
|
||||
NoOpUpDownCounter,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
Observation,
|
||||
set_meter_provider,
|
||||
UpDownCounter,
|
||||
]:
|
||||
obj.__module__ = __name__
|
||||
|
||||
__all__ = [
|
||||
"CallbackOptions",
|
||||
"MeterProvider",
|
||||
"NoOpMeterProvider",
|
||||
"Meter",
|
||||
"Counter",
|
||||
"_Gauge",
|
||||
"_NoOpGauge",
|
||||
"NoOpCounter",
|
||||
"UpDownCounter",
|
||||
"NoOpUpDownCounter",
|
||||
"Histogram",
|
||||
"NoOpHistogram",
|
||||
"ObservableCounter",
|
||||
"NoOpObservableCounter",
|
||||
"ObservableUpDownCounter",
|
||||
"Instrument",
|
||||
"Synchronous",
|
||||
"Asynchronous",
|
||||
"NoOpObservableGauge",
|
||||
"ObservableGauge",
|
||||
"NoOpObservableUpDownCounter",
|
||||
"get_meter",
|
||||
"get_meter_provider",
|
||||
"set_meter_provider",
|
||||
"Observation",
|
||||
"CallbackT",
|
||||
"NoOpMeter",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,889 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=too-many-ancestors
|
||||
|
||||
"""
|
||||
The OpenTelemetry metrics API describes the classes used to generate
|
||||
metrics.
|
||||
|
||||
The :class:`.MeterProvider` provides users access to the :class:`.Meter` which in
|
||||
turn is used to create :class:`.Instrument` objects. The :class:`.Instrument` objects are
|
||||
used to record measurements.
|
||||
|
||||
This module provides abstract (i.e. unimplemented) classes required for
|
||||
metrics, and a concrete no-op implementation :class:`.NoOpMeter` that allows applications
|
||||
to use the API package alone without a supporting implementation.
|
||||
|
||||
To get a meter, you need to provide the package name from which you are
|
||||
calling the meter APIs to OpenTelemetry by calling `MeterProvider.get_meter`
|
||||
with the calling instrumentation name and the version of your package.
|
||||
|
||||
The following code shows how to obtain a meter using the global :class:`.MeterProvider`::
|
||||
|
||||
from opentelemetry.metrics import get_meter
|
||||
|
||||
meter = get_meter("example-meter")
|
||||
counter = meter.create_counter("example-counter")
|
||||
|
||||
.. versionadded:: 1.10.0
|
||||
"""
|
||||
|
||||
import warnings
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from threading import Lock
|
||||
from typing import Dict, List, Optional, Sequence, Union, cast
|
||||
|
||||
from opentelemetry.environment_variables import OTEL_PYTHON_METER_PROVIDER
|
||||
from opentelemetry.metrics._internal.instrument import (
|
||||
CallbackT,
|
||||
Counter,
|
||||
Gauge,
|
||||
Histogram,
|
||||
NoOpCounter,
|
||||
NoOpGauge,
|
||||
NoOpHistogram,
|
||||
NoOpObservableCounter,
|
||||
NoOpObservableGauge,
|
||||
NoOpObservableUpDownCounter,
|
||||
NoOpUpDownCounter,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
UpDownCounter,
|
||||
_MetricsHistogramAdvisory,
|
||||
_ProxyCounter,
|
||||
_ProxyGauge,
|
||||
_ProxyHistogram,
|
||||
_ProxyObservableCounter,
|
||||
_ProxyObservableGauge,
|
||||
_ProxyObservableUpDownCounter,
|
||||
_ProxyUpDownCounter,
|
||||
)
|
||||
from opentelemetry.util._once import Once
|
||||
from opentelemetry.util._providers import _load_provider
|
||||
from opentelemetry.util.types import (
|
||||
Attributes,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
# pylint: disable=invalid-name
|
||||
_ProxyInstrumentT = Union[
|
||||
_ProxyCounter,
|
||||
_ProxyHistogram,
|
||||
_ProxyGauge,
|
||||
_ProxyObservableCounter,
|
||||
_ProxyObservableGauge,
|
||||
_ProxyObservableUpDownCounter,
|
||||
_ProxyUpDownCounter,
|
||||
]
|
||||
|
||||
|
||||
class MeterProvider(ABC):
|
||||
"""
|
||||
MeterProvider is the entry point of the API. It provides access to `Meter` instances.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def get_meter(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> "Meter":
|
||||
"""Returns a `Meter` for use by the given instrumentation library.
|
||||
|
||||
For any two calls it is undefined whether the same or different
|
||||
`Meter` instances are returned, even for different library names.
|
||||
|
||||
This function may return different `Meter` types (e.g. a no-op meter
|
||||
vs. a functional meter).
|
||||
|
||||
Args:
|
||||
name: The name of the instrumenting module.
|
||||
``__name__`` may not be used as this can result in
|
||||
different meter names if the meters are in different files.
|
||||
It is better to use a fixed string that can be imported where
|
||||
needed and used consistently as the name of the meter.
|
||||
|
||||
This should *not* be the name of the module that is
|
||||
instrumented but the name of the module doing the instrumentation.
|
||||
E.g., instead of ``"requests"``, use
|
||||
``"opentelemetry.instrumentation.requests"``.
|
||||
|
||||
version: Optional. The version string of the
|
||||
instrumenting library. Usually this should be the same as
|
||||
``importlib.metadata.version(instrumenting_library_name)``.
|
||||
|
||||
schema_url: Optional. Specifies the Schema URL of the emitted telemetry.
|
||||
attributes: Optional. Attributes that are associated with the emitted telemetry.
|
||||
"""
|
||||
|
||||
|
||||
class NoOpMeterProvider(MeterProvider):
|
||||
"""The default MeterProvider used when no MeterProvider implementation is available."""
|
||||
|
||||
def get_meter(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> "Meter":
|
||||
"""Returns a NoOpMeter."""
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
|
||||
class _ProxyMeterProvider(MeterProvider):
|
||||
def __init__(self) -> None:
|
||||
self._lock = Lock()
|
||||
self._meters: List[_ProxyMeter] = []
|
||||
self._real_meter_provider: Optional[MeterProvider] = None
|
||||
|
||||
def get_meter(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> "Meter":
|
||||
with self._lock:
|
||||
if self._real_meter_provider is not None:
|
||||
return self._real_meter_provider.get_meter(
|
||||
name, version, schema_url
|
||||
)
|
||||
|
||||
meter = _ProxyMeter(name, version=version, schema_url=schema_url)
|
||||
self._meters.append(meter)
|
||||
return meter
|
||||
|
||||
def on_set_meter_provider(self, meter_provider: MeterProvider) -> None:
|
||||
with self._lock:
|
||||
self._real_meter_provider = meter_provider
|
||||
for meter in self._meters:
|
||||
meter.on_set_meter_provider(meter_provider)
|
||||
|
||||
|
||||
@dataclass
|
||||
class _InstrumentRegistrationStatus:
|
||||
instrument_id: str
|
||||
already_registered: bool
|
||||
conflict: bool
|
||||
current_advisory: Optional[_MetricsHistogramAdvisory]
|
||||
|
||||
|
||||
class Meter(ABC):
|
||||
"""Handles instrument creation.
|
||||
|
||||
This class provides methods for creating instruments which are then
|
||||
used to produce measurements.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
) -> None:
|
||||
super().__init__()
|
||||
self._name = name
|
||||
self._version = version
|
||||
self._schema_url = schema_url
|
||||
self._instrument_ids: Dict[
|
||||
str, Optional[_MetricsHistogramAdvisory]
|
||||
] = {}
|
||||
self._instrument_ids_lock = Lock()
|
||||
|
||||
@property
|
||||
def name(self) -> str:
|
||||
"""
|
||||
The name of the instrumenting module.
|
||||
"""
|
||||
return self._name
|
||||
|
||||
@property
|
||||
def version(self) -> Optional[str]:
|
||||
"""
|
||||
The version string of the instrumenting library.
|
||||
"""
|
||||
return self._version
|
||||
|
||||
@property
|
||||
def schema_url(self) -> Optional[str]:
|
||||
"""
|
||||
Specifies the Schema URL of the emitted telemetry
|
||||
"""
|
||||
return self._schema_url
|
||||
|
||||
def _register_instrument(
|
||||
self,
|
||||
name: str,
|
||||
type_: type,
|
||||
unit: str,
|
||||
description: str,
|
||||
advisory: Optional[_MetricsHistogramAdvisory] = None,
|
||||
) -> _InstrumentRegistrationStatus:
|
||||
"""
|
||||
Register an instrument with the name, type, unit and description as
|
||||
identifying keys and the advisory as value.
|
||||
|
||||
Returns a tuple. The first value is the instrument id.
|
||||
The second value is an `_InstrumentRegistrationStatus` where
|
||||
`already_registered` is `True` if the instrument has been registered
|
||||
already.
|
||||
If `conflict` is set to True the `current_advisory` attribute contains
|
||||
the registered instrument advisory.
|
||||
"""
|
||||
|
||||
instrument_id = ",".join(
|
||||
[name.strip().lower(), type_.__name__, unit, description]
|
||||
)
|
||||
|
||||
already_registered = False
|
||||
conflict = False
|
||||
current_advisory = None
|
||||
|
||||
with self._instrument_ids_lock:
|
||||
# we are not using get because None is a valid value
|
||||
already_registered = instrument_id in self._instrument_ids
|
||||
if already_registered:
|
||||
current_advisory = self._instrument_ids[instrument_id]
|
||||
conflict = current_advisory != advisory
|
||||
else:
|
||||
self._instrument_ids[instrument_id] = advisory
|
||||
|
||||
return _InstrumentRegistrationStatus(
|
||||
instrument_id=instrument_id,
|
||||
already_registered=already_registered,
|
||||
conflict=conflict,
|
||||
current_advisory=current_advisory,
|
||||
)
|
||||
|
||||
@staticmethod
|
||||
def _log_instrument_registration_conflict(
|
||||
name: str,
|
||||
instrumentation_type: str,
|
||||
unit: str,
|
||||
description: str,
|
||||
status: _InstrumentRegistrationStatus,
|
||||
) -> None:
|
||||
_logger.warning(
|
||||
"An instrument with name %s, type %s, unit %s and "
|
||||
"description %s has been created already with a "
|
||||
"different advisory value %s and will be used instead.",
|
||||
name,
|
||||
instrumentation_type,
|
||||
unit,
|
||||
description,
|
||||
status.current_advisory,
|
||||
)
|
||||
|
||||
@abstractmethod
|
||||
def create_counter(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> Counter:
|
||||
"""Creates a `Counter` instrument
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def create_up_down_counter(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> UpDownCounter:
|
||||
"""Creates an `UpDownCounter` instrument
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def create_observable_counter(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableCounter:
|
||||
"""Creates an `ObservableCounter` instrument
|
||||
|
||||
An observable counter observes a monotonically increasing count by calling provided
|
||||
callbacks which accept a :class:`~opentelemetry.metrics.CallbackOptions` and return
|
||||
multiple :class:`~opentelemetry.metrics.Observation`.
|
||||
|
||||
For example, an observable counter could be used to report system CPU
|
||||
time periodically. Here is a basic implementation::
|
||||
|
||||
def cpu_time_callback(options: CallbackOptions) -> Iterable[Observation]:
|
||||
observations = []
|
||||
with open("/proc/stat") as procstat:
|
||||
procstat.readline() # skip the first line
|
||||
for line in procstat:
|
||||
if not line.startswith("cpu"): break
|
||||
cpu, *states = line.split()
|
||||
observations.append(Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"}))
|
||||
observations.append(Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"}))
|
||||
observations.append(Observation(int(states[2]) // 100, {"cpu": cpu, "state": "system"}))
|
||||
# ... other states
|
||||
return observations
|
||||
|
||||
meter.create_observable_counter(
|
||||
"system.cpu.time",
|
||||
callbacks=[cpu_time_callback],
|
||||
unit="s",
|
||||
description="CPU time"
|
||||
)
|
||||
|
||||
To reduce memory usage, you can use generator callbacks instead of
|
||||
building the full list::
|
||||
|
||||
def cpu_time_callback(options: CallbackOptions) -> Iterable[Observation]:
|
||||
with open("/proc/stat") as procstat:
|
||||
procstat.readline() # skip the first line
|
||||
for line in procstat:
|
||||
if not line.startswith("cpu"): break
|
||||
cpu, *states = line.split()
|
||||
yield Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"})
|
||||
yield Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"})
|
||||
# ... other states
|
||||
|
||||
Alternatively, you can pass a sequence of generators directly instead of a sequence of
|
||||
callbacks, which each should return iterables of :class:`~opentelemetry.metrics.Observation`::
|
||||
|
||||
def cpu_time_callback(states_to_include: set[str]) -> Iterable[Iterable[Observation]]:
|
||||
# accept options sent in from OpenTelemetry
|
||||
options = yield
|
||||
while True:
|
||||
observations = []
|
||||
with open("/proc/stat") as procstat:
|
||||
procstat.readline() # skip the first line
|
||||
for line in procstat:
|
||||
if not line.startswith("cpu"): break
|
||||
cpu, *states = line.split()
|
||||
if "user" in states_to_include:
|
||||
observations.append(Observation(int(states[0]) // 100, {"cpu": cpu, "state": "user"}))
|
||||
if "nice" in states_to_include:
|
||||
observations.append(Observation(int(states[1]) // 100, {"cpu": cpu, "state": "nice"}))
|
||||
# ... other states
|
||||
# yield the observations and receive the options for next iteration
|
||||
options = yield observations
|
||||
|
||||
meter.create_observable_counter(
|
||||
"system.cpu.time",
|
||||
callbacks=[cpu_time_callback({"user", "system"})],
|
||||
unit="s",
|
||||
description="CPU time"
|
||||
)
|
||||
|
||||
The :class:`~opentelemetry.metrics.CallbackOptions` contain a timeout which the
|
||||
callback should respect. For example if the callback does asynchronous work, like
|
||||
making HTTP requests, it should respect the timeout::
|
||||
|
||||
def scrape_http_callback(options: CallbackOptions) -> Iterable[Observation]:
|
||||
r = requests.get('http://scrapethis.com', timeout=options.timeout_millis / 10**3)
|
||||
for value in r.json():
|
||||
yield Observation(value)
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
callbacks: A sequence of callbacks that return an iterable of
|
||||
:class:`~opentelemetry.metrics.Observation`. Alternatively, can be a sequence of generators that each
|
||||
yields iterables of :class:`~opentelemetry.metrics.Observation`.
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def create_histogram(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
*,
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> Histogram:
|
||||
"""Creates a :class:`~opentelemetry.metrics.Histogram` instrument
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
|
||||
def create_gauge( # type: ignore # pylint: disable=no-self-use
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> Gauge: # pyright: ignore[reportReturnType]
|
||||
"""Creates a ``Gauge`` instrument
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
warnings.warn("create_gauge() is not implemented and will be a no-op")
|
||||
|
||||
@abstractmethod
|
||||
def create_observable_gauge(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableGauge:
|
||||
"""Creates an `ObservableGauge` instrument
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
callbacks: A sequence of callbacks that return an iterable of
|
||||
:class:`~opentelemetry.metrics.Observation`. Alternatively, can be a generator that yields iterables
|
||||
of :class:`~opentelemetry.metrics.Observation`.
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def create_observable_up_down_counter(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableUpDownCounter:
|
||||
"""Creates an `ObservableUpDownCounter` instrument
|
||||
|
||||
Args:
|
||||
name: The name of the instrument to be created
|
||||
callbacks: A sequence of callbacks that return an iterable of
|
||||
:class:`~opentelemetry.metrics.Observation`. Alternatively, can be a generator that yields iterables
|
||||
of :class:`~opentelemetry.metrics.Observation`.
|
||||
unit: The unit for observations this instrument reports. For
|
||||
example, ``By`` for bytes. UCUM units are recommended.
|
||||
description: A description for this instrument and what it measures.
|
||||
"""
|
||||
|
||||
|
||||
class _ProxyMeter(Meter):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
) -> None:
|
||||
super().__init__(name, version=version, schema_url=schema_url)
|
||||
self._lock = Lock()
|
||||
self._instruments: List[_ProxyInstrumentT] = []
|
||||
self._real_meter: Optional[Meter] = None
|
||||
|
||||
def on_set_meter_provider(self, meter_provider: MeterProvider) -> None:
|
||||
"""Called when a real meter provider is set on the creating _ProxyMeterProvider
|
||||
|
||||
Creates a real backing meter for this instance and notifies all created
|
||||
instruments so they can create real backing instruments.
|
||||
"""
|
||||
real_meter = meter_provider.get_meter(
|
||||
self._name, self._version, self._schema_url
|
||||
)
|
||||
|
||||
with self._lock:
|
||||
self._real_meter = real_meter
|
||||
# notify all proxy instruments of the new meter so they can create
|
||||
# real instruments to back themselves
|
||||
for instrument in self._instruments:
|
||||
instrument.on_meter_set(real_meter)
|
||||
|
||||
def create_counter(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> Counter:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_counter(name, unit, description)
|
||||
proxy = _ProxyCounter(name, unit, description)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
def create_up_down_counter(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> UpDownCounter:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_up_down_counter(
|
||||
name, unit, description
|
||||
)
|
||||
proxy = _ProxyUpDownCounter(name, unit, description)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
def create_observable_counter(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableCounter:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_observable_counter(
|
||||
name, callbacks, unit, description
|
||||
)
|
||||
proxy = _ProxyObservableCounter(
|
||||
name, callbacks, unit=unit, description=description
|
||||
)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
def create_histogram(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
*,
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> Histogram:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_histogram(
|
||||
name,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
proxy = _ProxyHistogram(
|
||||
name, unit, description, explicit_bucket_boundaries_advisory
|
||||
)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
def create_gauge(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> Gauge:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_gauge(name, unit, description)
|
||||
proxy = _ProxyGauge(name, unit, description)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
def create_observable_gauge(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableGauge:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_observable_gauge(
|
||||
name, callbacks, unit, description
|
||||
)
|
||||
proxy = _ProxyObservableGauge(
|
||||
name, callbacks, unit=unit, description=description
|
||||
)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
def create_observable_up_down_counter(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableUpDownCounter:
|
||||
with self._lock:
|
||||
if self._real_meter:
|
||||
return self._real_meter.create_observable_up_down_counter(
|
||||
name,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
proxy = _ProxyObservableUpDownCounter(
|
||||
name, callbacks, unit=unit, description=description
|
||||
)
|
||||
self._instruments.append(proxy)
|
||||
return proxy
|
||||
|
||||
|
||||
class NoOpMeter(Meter):
|
||||
"""The default Meter used when no Meter implementation is available.
|
||||
|
||||
All operations are no-op.
|
||||
"""
|
||||
|
||||
def create_counter(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> Counter:
|
||||
"""Returns a no-op Counter."""
|
||||
status = self._register_instrument(
|
||||
name, NoOpCounter, unit, description
|
||||
)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
Counter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
|
||||
return NoOpCounter(name, unit=unit, description=description)
|
||||
|
||||
def create_gauge(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> Gauge:
|
||||
"""Returns a no-op Gauge."""
|
||||
status = self._register_instrument(name, NoOpGauge, unit, description)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
Gauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
return NoOpGauge(name, unit=unit, description=description)
|
||||
|
||||
def create_up_down_counter(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> UpDownCounter:
|
||||
"""Returns a no-op UpDownCounter."""
|
||||
status = self._register_instrument(
|
||||
name, NoOpUpDownCounter, unit, description
|
||||
)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
UpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
return NoOpUpDownCounter(name, unit=unit, description=description)
|
||||
|
||||
def create_observable_counter(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableCounter:
|
||||
"""Returns a no-op ObservableCounter."""
|
||||
status = self._register_instrument(
|
||||
name, NoOpObservableCounter, unit, description
|
||||
)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
ObservableCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
return NoOpObservableCounter(
|
||||
name,
|
||||
callbacks,
|
||||
unit=unit,
|
||||
description=description,
|
||||
)
|
||||
|
||||
def create_histogram(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
*,
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> Histogram:
|
||||
"""Returns a no-op Histogram."""
|
||||
status = self._register_instrument(
|
||||
name,
|
||||
NoOpHistogram,
|
||||
unit,
|
||||
description,
|
||||
_MetricsHistogramAdvisory(
|
||||
explicit_bucket_boundaries=explicit_bucket_boundaries_advisory
|
||||
),
|
||||
)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
Histogram.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
return NoOpHistogram(
|
||||
name,
|
||||
unit=unit,
|
||||
description=description,
|
||||
explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
|
||||
def create_observable_gauge(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableGauge:
|
||||
"""Returns a no-op ObservableGauge."""
|
||||
status = self._register_instrument(
|
||||
name, NoOpObservableGauge, unit, description
|
||||
)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
ObservableGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
return NoOpObservableGauge(
|
||||
name,
|
||||
callbacks,
|
||||
unit=unit,
|
||||
description=description,
|
||||
)
|
||||
|
||||
def create_observable_up_down_counter(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> ObservableUpDownCounter:
|
||||
"""Returns a no-op ObservableUpDownCounter."""
|
||||
status = self._register_instrument(
|
||||
name, NoOpObservableUpDownCounter, unit, description
|
||||
)
|
||||
if status.conflict:
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
ObservableUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
return NoOpObservableUpDownCounter(
|
||||
name,
|
||||
callbacks,
|
||||
unit=unit,
|
||||
description=description,
|
||||
)
|
||||
|
||||
|
||||
_METER_PROVIDER_SET_ONCE = Once()
|
||||
_METER_PROVIDER: Optional[MeterProvider] = None
|
||||
_PROXY_METER_PROVIDER = _ProxyMeterProvider()
|
||||
|
||||
|
||||
def get_meter(
|
||||
name: str,
|
||||
version: str = "",
|
||||
meter_provider: Optional[MeterProvider] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> "Meter":
|
||||
"""Returns a `Meter` for use by the given instrumentation library.
|
||||
|
||||
This function is a convenience wrapper for
|
||||
`opentelemetry.metrics.MeterProvider.get_meter`.
|
||||
|
||||
If meter_provider is omitted the current configured one is used.
|
||||
"""
|
||||
if meter_provider is None:
|
||||
meter_provider = get_meter_provider()
|
||||
return meter_provider.get_meter(name, version, schema_url, attributes)
|
||||
|
||||
|
||||
def _set_meter_provider(meter_provider: MeterProvider, log: bool) -> None:
|
||||
def set_mp() -> None:
|
||||
global _METER_PROVIDER # pylint: disable=global-statement
|
||||
_METER_PROVIDER = meter_provider
|
||||
|
||||
# gives all proxies real instruments off the newly set meter provider
|
||||
_PROXY_METER_PROVIDER.on_set_meter_provider(meter_provider)
|
||||
|
||||
did_set = _METER_PROVIDER_SET_ONCE.do_once(set_mp)
|
||||
|
||||
if log and not did_set:
|
||||
_logger.warning("Overriding of current MeterProvider is not allowed")
|
||||
|
||||
|
||||
def set_meter_provider(meter_provider: MeterProvider) -> None:
|
||||
"""Sets the current global :class:`~.MeterProvider` object.
|
||||
|
||||
This can only be done once, a warning will be logged if any further attempt
|
||||
is made.
|
||||
"""
|
||||
_set_meter_provider(meter_provider, log=True)
|
||||
|
||||
|
||||
def get_meter_provider() -> MeterProvider:
|
||||
"""Gets the current global :class:`~.MeterProvider` object."""
|
||||
|
||||
if _METER_PROVIDER is None:
|
||||
if OTEL_PYTHON_METER_PROVIDER not in environ:
|
||||
return _PROXY_METER_PROVIDER
|
||||
|
||||
meter_provider: MeterProvider = _load_provider( # type: ignore
|
||||
OTEL_PYTHON_METER_PROVIDER, "meter_provider"
|
||||
)
|
||||
_set_meter_provider(meter_provider, log=False)
|
||||
|
||||
# _METER_PROVIDER will have been set by one thread
|
||||
return cast("MeterProvider", _METER_PROVIDER)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,530 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# pylint: disable=too-many-ancestors
|
||||
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from logging import getLogger
|
||||
from re import compile as re_compile
|
||||
from typing import (
|
||||
Callable,
|
||||
Dict,
|
||||
Generator,
|
||||
Generic,
|
||||
Iterable,
|
||||
Optional,
|
||||
Sequence,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
|
||||
# pylint: disable=unused-import; needed for typing and sphinx
|
||||
from opentelemetry import metrics
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.metrics._internal.observation import Observation
|
||||
from opentelemetry.util.types import (
|
||||
Attributes,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
_name_regex = re_compile(r"[a-zA-Z][-_./a-zA-Z0-9]{0,254}")
|
||||
_unit_regex = re_compile(r"[\x00-\x7F]{0,63}")
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class _MetricsHistogramAdvisory:
|
||||
explicit_bucket_boundaries: Optional[Sequence[float]] = None
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class CallbackOptions:
|
||||
"""Options for the callback
|
||||
|
||||
Args:
|
||||
timeout_millis: Timeout for the callback's execution. If the callback does asynchronous
|
||||
work (e.g. HTTP requests), it should respect this timeout.
|
||||
"""
|
||||
|
||||
timeout_millis: float = 10_000
|
||||
|
||||
|
||||
InstrumentT = TypeVar("InstrumentT", bound="Instrument")
|
||||
# pylint: disable=invalid-name
|
||||
CallbackT = Union[
|
||||
Callable[[CallbackOptions], Iterable[Observation]],
|
||||
Generator[Iterable[Observation], CallbackOptions, None],
|
||||
]
|
||||
|
||||
|
||||
class Instrument(ABC):
|
||||
"""Abstract class that serves as base for all instruments."""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def _check_name_unit_description(
|
||||
name: str, unit: str, description: str
|
||||
) -> Dict[str, Optional[str]]:
|
||||
"""
|
||||
Checks the following instrument name, unit and description for
|
||||
compliance with the spec.
|
||||
|
||||
Returns a dict with keys "name", "unit" and "description", the
|
||||
corresponding values will be the checked strings or `None` if the value
|
||||
is invalid. If valid, the checked strings should be used instead of the
|
||||
original values.
|
||||
"""
|
||||
|
||||
result: Dict[str, Optional[str]] = {}
|
||||
|
||||
if _name_regex.fullmatch(name) is not None:
|
||||
result["name"] = name
|
||||
else:
|
||||
result["name"] = None
|
||||
|
||||
if unit is None:
|
||||
unit = ""
|
||||
if _unit_regex.fullmatch(unit) is not None:
|
||||
result["unit"] = unit
|
||||
else:
|
||||
result["unit"] = None
|
||||
|
||||
if description is None:
|
||||
result["description"] = ""
|
||||
else:
|
||||
result["description"] = description
|
||||
|
||||
return result
|
||||
|
||||
|
||||
class _ProxyInstrument(ABC, Generic[InstrumentT]):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
self._name = name
|
||||
self._unit = unit
|
||||
self._description = description
|
||||
self._real_instrument: Optional[InstrumentT] = None
|
||||
|
||||
def on_meter_set(self, meter: "metrics.Meter") -> None:
|
||||
"""Called when a real meter is set on the creating _ProxyMeter"""
|
||||
|
||||
# We don't need any locking on proxy instruments because it's OK if some
|
||||
# measurements get dropped while a real backing instrument is being
|
||||
# created.
|
||||
self._real_instrument = self._create_real_instrument(meter)
|
||||
|
||||
@abstractmethod
|
||||
def _create_real_instrument(self, meter: "metrics.Meter") -> InstrumentT:
|
||||
"""Create an instance of the real instrument. Implement this."""
|
||||
|
||||
|
||||
class _ProxyAsynchronousInstrument(_ProxyInstrument[InstrumentT]):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(name, unit, description)
|
||||
self._callbacks = callbacks
|
||||
|
||||
|
||||
class Synchronous(Instrument):
|
||||
"""Base class for all synchronous instruments"""
|
||||
|
||||
|
||||
class Asynchronous(Instrument):
|
||||
"""Base class for all asynchronous instruments"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
|
||||
|
||||
class Counter(Synchronous):
|
||||
"""A Counter is a synchronous `Instrument` which supports non-negative increments."""
|
||||
|
||||
@abstractmethod
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class NoOpCounter(Counter):
|
||||
"""No-op implementation of `Counter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
return super().add(amount, attributes=attributes, context=context)
|
||||
|
||||
|
||||
class _ProxyCounter(_ProxyInstrument[Counter], Counter):
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
if self._real_instrument:
|
||||
self._real_instrument.add(amount, attributes, context)
|
||||
|
||||
def _create_real_instrument(self, meter: "metrics.Meter") -> Counter:
|
||||
return meter.create_counter(
|
||||
self._name,
|
||||
self._unit,
|
||||
self._description,
|
||||
)
|
||||
|
||||
|
||||
class UpDownCounter(Synchronous):
|
||||
"""An UpDownCounter is a synchronous `Instrument` which supports increments and decrements."""
|
||||
|
||||
@abstractmethod
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class NoOpUpDownCounter(UpDownCounter):
|
||||
"""No-op implementation of `UpDownCounter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
return super().add(amount, attributes=attributes, context=context)
|
||||
|
||||
|
||||
class _ProxyUpDownCounter(_ProxyInstrument[UpDownCounter], UpDownCounter):
|
||||
def add(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
if self._real_instrument:
|
||||
self._real_instrument.add(amount, attributes, context)
|
||||
|
||||
def _create_real_instrument(self, meter: "metrics.Meter") -> UpDownCounter:
|
||||
return meter.create_up_down_counter(
|
||||
self._name,
|
||||
self._unit,
|
||||
self._description,
|
||||
)
|
||||
|
||||
|
||||
class ObservableCounter(Asynchronous):
|
||||
"""An ObservableCounter is an asynchronous `Instrument` which reports monotonically
|
||||
increasing value(s) when the instrument is being observed.
|
||||
"""
|
||||
|
||||
|
||||
class NoOpObservableCounter(ObservableCounter):
|
||||
"""No-op implementation of `ObservableCounter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(
|
||||
name,
|
||||
callbacks,
|
||||
unit=unit,
|
||||
description=description,
|
||||
)
|
||||
|
||||
|
||||
class _ProxyObservableCounter(
|
||||
_ProxyAsynchronousInstrument[ObservableCounter], ObservableCounter
|
||||
):
|
||||
def _create_real_instrument(
|
||||
self, meter: "metrics.Meter"
|
||||
) -> ObservableCounter:
|
||||
return meter.create_observable_counter(
|
||||
self._name,
|
||||
self._callbacks,
|
||||
self._unit,
|
||||
self._description,
|
||||
)
|
||||
|
||||
|
||||
class ObservableUpDownCounter(Asynchronous):
|
||||
"""An ObservableUpDownCounter is an asynchronous `Instrument` which reports additive value(s) (e.g.
|
||||
the process heap size - it makes sense to report the heap size from multiple processes and sum them
|
||||
up, so we get the total heap usage) when the instrument is being observed.
|
||||
"""
|
||||
|
||||
|
||||
class NoOpObservableUpDownCounter(ObservableUpDownCounter):
|
||||
"""No-op implementation of `ObservableUpDownCounter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(
|
||||
name,
|
||||
callbacks,
|
||||
unit=unit,
|
||||
description=description,
|
||||
)
|
||||
|
||||
|
||||
class _ProxyObservableUpDownCounter(
|
||||
_ProxyAsynchronousInstrument[ObservableUpDownCounter],
|
||||
ObservableUpDownCounter,
|
||||
):
|
||||
def _create_real_instrument(
|
||||
self, meter: "metrics.Meter"
|
||||
) -> ObservableUpDownCounter:
|
||||
return meter.create_observable_up_down_counter(
|
||||
self._name,
|
||||
self._callbacks,
|
||||
self._unit,
|
||||
self._description,
|
||||
)
|
||||
|
||||
|
||||
class Histogram(Synchronous):
|
||||
"""Histogram is a synchronous `Instrument` which can be used to report arbitrary values
|
||||
that are likely to be statistically meaningful. It is intended for statistics such as
|
||||
histograms, summaries, and percentile.
|
||||
"""
|
||||
|
||||
@abstractmethod
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def record(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class NoOpHistogram(Histogram):
|
||||
"""No-op implementation of `Histogram`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> None:
|
||||
super().__init__(
|
||||
name,
|
||||
unit=unit,
|
||||
description=description,
|
||||
explicit_bucket_boundaries_advisory=explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
|
||||
def record(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
return super().record(amount, attributes=attributes, context=context)
|
||||
|
||||
|
||||
class _ProxyHistogram(_ProxyInstrument[Histogram], Histogram):
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> None:
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
self._explicit_bucket_boundaries_advisory = (
|
||||
explicit_bucket_boundaries_advisory
|
||||
)
|
||||
|
||||
def record(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
if self._real_instrument:
|
||||
self._real_instrument.record(amount, attributes, context)
|
||||
|
||||
def _create_real_instrument(self, meter: "metrics.Meter") -> Histogram:
|
||||
return meter.create_histogram(
|
||||
self._name,
|
||||
self._unit,
|
||||
self._description,
|
||||
explicit_bucket_boundaries_advisory=self._explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
|
||||
|
||||
class ObservableGauge(Asynchronous):
|
||||
"""Asynchronous Gauge is an asynchronous `Instrument` which reports non-additive value(s) (e.g.
|
||||
the room temperature - it makes no sense to report the temperature value from multiple rooms
|
||||
and sum them up) when the instrument is being observed.
|
||||
"""
|
||||
|
||||
|
||||
class NoOpObservableGauge(ObservableGauge):
|
||||
"""No-op implementation of `ObservableGauge`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
callbacks: Optional[Sequence[CallbackT]] = None,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(
|
||||
name,
|
||||
callbacks,
|
||||
unit=unit,
|
||||
description=description,
|
||||
)
|
||||
|
||||
|
||||
class _ProxyObservableGauge(
|
||||
_ProxyAsynchronousInstrument[ObservableGauge],
|
||||
ObservableGauge,
|
||||
):
|
||||
def _create_real_instrument(
|
||||
self, meter: "metrics.Meter"
|
||||
) -> ObservableGauge:
|
||||
return meter.create_observable_gauge(
|
||||
self._name,
|
||||
self._callbacks,
|
||||
self._unit,
|
||||
self._description,
|
||||
)
|
||||
|
||||
|
||||
class Gauge(Synchronous):
|
||||
"""A Gauge is a synchronous `Instrument` which can be used to record non-additive values as they occur."""
|
||||
|
||||
@abstractmethod
|
||||
def set(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
pass
|
||||
|
||||
|
||||
class NoOpGauge(Gauge):
|
||||
"""No-op implementation of ``Gauge``."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
) -> None:
|
||||
super().__init__(name, unit=unit, description=description)
|
||||
|
||||
def set(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
return super().set(amount, attributes=attributes, context=context)
|
||||
|
||||
|
||||
class _ProxyGauge(
|
||||
_ProxyInstrument[Gauge],
|
||||
Gauge,
|
||||
):
|
||||
def set(
|
||||
self,
|
||||
amount: Union[int, float],
|
||||
attributes: Optional[Attributes] = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
if self._real_instrument:
|
||||
self._real_instrument.set(amount, attributes, context)
|
||||
|
||||
def _create_real_instrument(self, meter: "metrics.Meter") -> Gauge:
|
||||
return meter.create_gauge(
|
||||
self._name,
|
||||
self._unit,
|
||||
self._description,
|
||||
)
|
||||
@@ -0,0 +1,63 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Optional, Union
|
||||
|
||||
from opentelemetry.context import Context
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
|
||||
class Observation:
|
||||
"""A measurement observed in an asynchronous instrument
|
||||
|
||||
Return/yield instances of this class from asynchronous instrument callbacks.
|
||||
|
||||
Args:
|
||||
value: The float or int measured value
|
||||
attributes: The measurement's attributes
|
||||
context: The measurement's context
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
value: Union[int, float],
|
||||
attributes: Attributes = None,
|
||||
context: Optional[Context] = None,
|
||||
) -> None:
|
||||
self._value = value
|
||||
self._attributes = attributes
|
||||
self._context = context
|
||||
|
||||
@property
|
||||
def value(self) -> Union[float, int]:
|
||||
return self._value
|
||||
|
||||
@property
|
||||
def attributes(self) -> Attributes:
|
||||
return self._attributes
|
||||
|
||||
@property
|
||||
def context(self) -> Optional[Context]:
|
||||
return self._context
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
return (
|
||||
isinstance(other, Observation)
|
||||
and self.value == other.value
|
||||
and self.attributes == other.attributes
|
||||
and self.context == other.context
|
||||
)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"Observation(value={self.value}, attributes={self.attributes}, context={self.context})"
|
||||
@@ -0,0 +1,167 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
API for propagation of context.
|
||||
|
||||
The propagators for the
|
||||
``opentelemetry.propagators.composite.CompositePropagator`` can be defined
|
||||
via configuration in the ``OTEL_PROPAGATORS`` environment variable. This
|
||||
variable should be set to a comma-separated string of names of values for the
|
||||
``opentelemetry_propagator`` entry point. For example, setting
|
||||
``OTEL_PROPAGATORS`` to ``tracecontext,baggage`` (which is the default value)
|
||||
would instantiate
|
||||
``opentelemetry.propagators.composite.CompositePropagator`` with 2
|
||||
propagators, one of type
|
||||
``opentelemetry.trace.propagation.tracecontext.TraceContextTextMapPropagator``
|
||||
and other of type ``opentelemetry.baggage.propagation.W3CBaggagePropagator``.
|
||||
Notice that these propagator classes are defined as
|
||||
``opentelemetry_propagator`` entry points in the ``pyproject.toml`` file of
|
||||
``opentelemetry``.
|
||||
|
||||
Example::
|
||||
|
||||
import flask
|
||||
import requests
|
||||
from opentelemetry import propagate
|
||||
|
||||
|
||||
PROPAGATOR = propagate.get_global_textmap()
|
||||
|
||||
|
||||
def get_header_from_flask_request(request, key):
|
||||
return request.headers.get_all(key)
|
||||
|
||||
def set_header_into_requests_request(request: requests.Request,
|
||||
key: str, value: str):
|
||||
request.headers[key] = value
|
||||
|
||||
def example_route():
|
||||
context = PROPAGATOR.extract(
|
||||
get_header_from_flask_request,
|
||||
flask.request
|
||||
)
|
||||
request_to_downstream = requests.Request(
|
||||
"GET", "http://httpbin.org/get"
|
||||
)
|
||||
PROPAGATOR.inject(
|
||||
set_header_into_requests_request,
|
||||
request_to_downstream,
|
||||
context=context
|
||||
)
|
||||
session = requests.Session()
|
||||
session.send(request_to_downstream.prepare())
|
||||
|
||||
|
||||
.. _Propagation API Specification:
|
||||
https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/context/api-propagators.md
|
||||
"""
|
||||
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from typing import Optional
|
||||
|
||||
from opentelemetry.context.context import Context
|
||||
from opentelemetry.environment_variables import OTEL_PROPAGATORS
|
||||
from opentelemetry.propagators import composite, textmap
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
def extract(
|
||||
carrier: textmap.CarrierT,
|
||||
context: Optional[Context] = None,
|
||||
getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter,
|
||||
) -> Context:
|
||||
"""Uses the configured propagator to extract a Context from the carrier.
|
||||
|
||||
Args:
|
||||
getter: an object which contains a get function that can retrieve zero
|
||||
or more values from the carrier and a keys function that can get all the keys
|
||||
from carrier.
|
||||
carrier: and object which contains values that are
|
||||
used to construct a Context. This object
|
||||
must be paired with an appropriate getter
|
||||
which understands how to extract a value from it.
|
||||
context: an optional Context to use. Defaults to root
|
||||
context if not set.
|
||||
"""
|
||||
return get_global_textmap().extract(carrier, context, getter=getter)
|
||||
|
||||
|
||||
def inject(
|
||||
carrier: textmap.CarrierT,
|
||||
context: Optional[Context] = None,
|
||||
setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter,
|
||||
) -> None:
|
||||
"""Uses the configured propagator to inject a Context into the carrier.
|
||||
|
||||
Args:
|
||||
carrier: the medium used by Propagators to read
|
||||
values from and write values to.
|
||||
Should be paired with setter, which
|
||||
should know how to set header values on the carrier.
|
||||
context: An optional Context to use. Defaults to current
|
||||
context if not set.
|
||||
setter: An optional `Setter` object that can set values
|
||||
on the carrier.
|
||||
"""
|
||||
get_global_textmap().inject(carrier, context=context, setter=setter)
|
||||
|
||||
|
||||
propagators = []
|
||||
|
||||
# Single use variable here to hack black and make lint pass
|
||||
environ_propagators = environ.get(
|
||||
OTEL_PROPAGATORS,
|
||||
"tracecontext,baggage",
|
||||
)
|
||||
|
||||
|
||||
for propagator in environ_propagators.split(","):
|
||||
propagator = propagator.strip()
|
||||
|
||||
try:
|
||||
propagators.append( # type: ignore
|
||||
next( # type: ignore
|
||||
iter( # type: ignore
|
||||
entry_points( # type: ignore
|
||||
group="opentelemetry_propagator",
|
||||
name=propagator,
|
||||
)
|
||||
)
|
||||
).load()()
|
||||
)
|
||||
except StopIteration:
|
||||
raise ValueError(
|
||||
f"Propagator {propagator} not found. It is either misspelled or not installed."
|
||||
)
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
logger.exception("Failed to load propagator: %s", propagator)
|
||||
raise
|
||||
|
||||
|
||||
_HTTP_TEXT_FORMAT = composite.CompositePropagator(propagators) # type: ignore
|
||||
|
||||
|
||||
def get_global_textmap() -> textmap.TextMapPropagator:
|
||||
return _HTTP_TEXT_FORMAT
|
||||
|
||||
|
||||
def set_global_textmap(
|
||||
http_text_format: textmap.TextMapPropagator,
|
||||
) -> None:
|
||||
global _HTTP_TEXT_FORMAT # pylint:disable=global-statement
|
||||
_HTTP_TEXT_FORMAT = http_text_format # type: ignore
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,91 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
import typing
|
||||
|
||||
from deprecated import deprecated
|
||||
|
||||
from opentelemetry.context.context import Context
|
||||
from opentelemetry.propagators import textmap
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CompositePropagator(textmap.TextMapPropagator):
|
||||
"""CompositePropagator provides a mechanism for combining multiple
|
||||
propagators into a single one.
|
||||
|
||||
Args:
|
||||
propagators: the list of propagators to use
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, propagators: typing.Sequence[textmap.TextMapPropagator]
|
||||
) -> None:
|
||||
self._propagators = propagators
|
||||
|
||||
def extract(
|
||||
self,
|
||||
carrier: textmap.CarrierT,
|
||||
context: typing.Optional[Context] = None,
|
||||
getter: textmap.Getter[textmap.CarrierT] = textmap.default_getter,
|
||||
) -> Context:
|
||||
"""Run each of the configured propagators with the given context and carrier.
|
||||
Propagators are run in the order they are configured, if multiple
|
||||
propagators write the same context key, the propagator later in the list
|
||||
will override previous propagators.
|
||||
|
||||
See `opentelemetry.propagators.textmap.TextMapPropagator.extract`
|
||||
"""
|
||||
for propagator in self._propagators:
|
||||
context = propagator.extract(carrier, context, getter=getter)
|
||||
return context # type: ignore
|
||||
|
||||
def inject(
|
||||
self,
|
||||
carrier: textmap.CarrierT,
|
||||
context: typing.Optional[Context] = None,
|
||||
setter: textmap.Setter[textmap.CarrierT] = textmap.default_setter,
|
||||
) -> None:
|
||||
"""Run each of the configured propagators with the given context and carrier.
|
||||
Propagators are run in the order they are configured, if multiple
|
||||
propagators write the same carrier key, the propagator later in the list
|
||||
will override previous propagators.
|
||||
|
||||
See `opentelemetry.propagators.textmap.TextMapPropagator.inject`
|
||||
"""
|
||||
for propagator in self._propagators:
|
||||
propagator.inject(carrier, context, setter=setter)
|
||||
|
||||
@property
|
||||
def fields(self) -> typing.Set[str]:
|
||||
"""Returns a set with the fields set in `inject`.
|
||||
|
||||
See
|
||||
`opentelemetry.propagators.textmap.TextMapPropagator.fields`
|
||||
"""
|
||||
composite_fields = set()
|
||||
|
||||
for propagator in self._propagators:
|
||||
for field in propagator.fields:
|
||||
composite_fields.add(field)
|
||||
|
||||
return composite_fields
|
||||
|
||||
|
||||
@deprecated(version="1.2.0", reason="You should use CompositePropagator") # type: ignore
|
||||
class CompositeHTTPPropagator(CompositePropagator):
|
||||
"""CompositeHTTPPropagator provides a mechanism for combining multiple
|
||||
propagators into a single one.
|
||||
"""
|
||||
@@ -0,0 +1,197 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import abc
|
||||
import typing
|
||||
|
||||
from opentelemetry.context.context import Context
|
||||
|
||||
CarrierT = typing.TypeVar("CarrierT")
|
||||
# pylint: disable=invalid-name
|
||||
CarrierValT = typing.Union[typing.List[str], str]
|
||||
|
||||
|
||||
class Getter(abc.ABC, typing.Generic[CarrierT]):
|
||||
"""This class implements a Getter that enables extracting propagated
|
||||
fields from a carrier.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def get(
|
||||
self, carrier: CarrierT, key: str
|
||||
) -> typing.Optional[typing.List[str]]:
|
||||
"""Function that can retrieve zero
|
||||
or more values from the carrier. In the case that
|
||||
the value does not exist, returns None.
|
||||
|
||||
Args:
|
||||
carrier: An object which contains values that are used to
|
||||
construct a Context.
|
||||
key: key of a field in carrier.
|
||||
Returns: first value of the propagation key or None if the key doesn't
|
||||
exist.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def keys(self, carrier: CarrierT) -> typing.List[str]:
|
||||
"""Function that can retrieve all the keys in a carrier object.
|
||||
|
||||
Args:
|
||||
carrier: An object which contains values that are
|
||||
used to construct a Context.
|
||||
Returns:
|
||||
list of keys from the carrier.
|
||||
"""
|
||||
|
||||
|
||||
class Setter(abc.ABC, typing.Generic[CarrierT]):
|
||||
"""This class implements a Setter that enables injecting propagated
|
||||
fields into a carrier.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def set(self, carrier: CarrierT, key: str, value: str) -> None:
|
||||
"""Function that can set a value into a carrier""
|
||||
|
||||
Args:
|
||||
carrier: An object which contains values that are used to
|
||||
construct a Context.
|
||||
key: key of a field in carrier.
|
||||
value: value for a field in carrier.
|
||||
"""
|
||||
|
||||
|
||||
class DefaultGetter(Getter[typing.Mapping[str, CarrierValT]]):
|
||||
def get(
|
||||
self, carrier: typing.Mapping[str, CarrierValT], key: str
|
||||
) -> typing.Optional[typing.List[str]]:
|
||||
"""Getter implementation to retrieve a value from a dictionary.
|
||||
|
||||
Args:
|
||||
carrier: dictionary in which to get value
|
||||
key: the key used to get the value
|
||||
Returns:
|
||||
A list with a single string with the value if it exists, else None.
|
||||
"""
|
||||
val = carrier.get(key, None)
|
||||
if val is None:
|
||||
return None
|
||||
if isinstance(val, typing.Iterable) and not isinstance(val, str):
|
||||
return list(val)
|
||||
return [val]
|
||||
|
||||
def keys(
|
||||
self, carrier: typing.Mapping[str, CarrierValT]
|
||||
) -> typing.List[str]:
|
||||
"""Keys implementation that returns all keys from a dictionary."""
|
||||
return list(carrier.keys())
|
||||
|
||||
|
||||
default_getter: Getter[CarrierT] = DefaultGetter() # type: ignore
|
||||
|
||||
|
||||
class DefaultSetter(Setter[typing.MutableMapping[str, CarrierValT]]):
|
||||
def set(
|
||||
self,
|
||||
carrier: typing.MutableMapping[str, CarrierValT],
|
||||
key: str,
|
||||
value: CarrierValT,
|
||||
) -> None:
|
||||
"""Setter implementation to set a value into a dictionary.
|
||||
|
||||
Args:
|
||||
carrier: dictionary in which to set value
|
||||
key: the key used to set the value
|
||||
value: the value to set
|
||||
"""
|
||||
carrier[key] = value
|
||||
|
||||
|
||||
default_setter: Setter[CarrierT] = DefaultSetter() # type: ignore
|
||||
|
||||
|
||||
class TextMapPropagator(abc.ABC):
|
||||
"""This class provides an interface that enables extracting and injecting
|
||||
context into headers of HTTP requests. HTTP frameworks and clients
|
||||
can integrate with TextMapPropagator by providing the object containing the
|
||||
headers, and a getter and setter function for the extraction and
|
||||
injection of values, respectively.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def extract(
|
||||
self,
|
||||
carrier: CarrierT,
|
||||
context: typing.Optional[Context] = None,
|
||||
getter: Getter[CarrierT] = default_getter,
|
||||
) -> Context:
|
||||
"""Create a Context from values in the carrier.
|
||||
|
||||
The extract function should retrieve values from the carrier
|
||||
object using getter, and use values to populate a
|
||||
Context value and return it.
|
||||
|
||||
Args:
|
||||
getter: a function that can retrieve zero
|
||||
or more values from the carrier. In the case that
|
||||
the value does not exist, return an empty list.
|
||||
carrier: and object which contains values that are
|
||||
used to construct a Context. This object
|
||||
must be paired with an appropriate getter
|
||||
which understands how to extract a value from it.
|
||||
context: an optional Context to use. Defaults to root
|
||||
context if not set.
|
||||
Returns:
|
||||
A Context with configuration found in the carrier.
|
||||
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def inject(
|
||||
self,
|
||||
carrier: CarrierT,
|
||||
context: typing.Optional[Context] = None,
|
||||
setter: Setter[CarrierT] = default_setter,
|
||||
) -> None:
|
||||
"""Inject values from a Context into a carrier.
|
||||
|
||||
inject enables the propagation of values into HTTP clients or
|
||||
other objects which perform an HTTP request. Implementations
|
||||
should use the `Setter` 's set method to set values on the
|
||||
carrier.
|
||||
|
||||
Args:
|
||||
carrier: An object that a place to define HTTP headers.
|
||||
Should be paired with setter, which should
|
||||
know how to set header values on the carrier.
|
||||
context: an optional Context to use. Defaults to current
|
||||
context if not set.
|
||||
setter: An optional `Setter` object that can set values
|
||||
on the carrier.
|
||||
|
||||
"""
|
||||
|
||||
@property
|
||||
@abc.abstractmethod
|
||||
def fields(self) -> typing.Set[str]:
|
||||
"""
|
||||
Gets the fields set in the carrier by the `inject` method.
|
||||
|
||||
If the carrier is reused, its fields that correspond with the ones
|
||||
present in this attribute should be deleted before calling `inject`.
|
||||
|
||||
Returns:
|
||||
A set with the fields set in `inject`.
|
||||
"""
|
||||
@@ -0,0 +1,206 @@
|
||||
# Copyright 2021 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import os
|
||||
|
||||
import requests
|
||||
from opentelemetry.context import attach, detach, set_value
|
||||
from opentelemetry.sdk.resources import Resource, ResourceDetector
|
||||
|
||||
_GCP_METADATA_URL = (
|
||||
"http://metadata.google.internal/computeMetadata/v1/?recursive=true"
|
||||
)
|
||||
_GCP_METADATA_URL_HEADER = {"Metadata-Flavor": "Google"}
|
||||
_TIMEOUT_SEC = 5
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _get_google_metadata_and_common_attributes():
|
||||
token = attach(set_value("suppress_instrumentation", True))
|
||||
all_metadata = requests.get(
|
||||
_GCP_METADATA_URL,
|
||||
headers=_GCP_METADATA_URL_HEADER,
|
||||
timeout=_TIMEOUT_SEC,
|
||||
).json()
|
||||
detach(token)
|
||||
common_attributes = {
|
||||
"cloud.account.id": all_metadata["project"]["projectId"],
|
||||
"cloud.provider": "gcp",
|
||||
"cloud.zone": all_metadata["instance"]["zone"].split("/")[-1],
|
||||
}
|
||||
return common_attributes, all_metadata
|
||||
|
||||
|
||||
def get_gce_resources():
|
||||
"""Resource finder for common GCE attributes
|
||||
|
||||
See: https://cloud.google.com/compute/docs/storing-retrieving-metadata
|
||||
"""
|
||||
(
|
||||
common_attributes,
|
||||
all_metadata,
|
||||
) = _get_google_metadata_and_common_attributes()
|
||||
common_attributes.update(
|
||||
{
|
||||
"host.id": all_metadata["instance"]["id"],
|
||||
"gcp.resource_type": "gce_instance",
|
||||
}
|
||||
)
|
||||
return common_attributes
|
||||
|
||||
|
||||
def get_gke_resources():
|
||||
"""Resource finder for GKE attributes"""
|
||||
|
||||
if os.getenv("KUBERNETES_SERVICE_HOST") is None:
|
||||
return {}
|
||||
|
||||
(
|
||||
common_attributes,
|
||||
all_metadata,
|
||||
) = _get_google_metadata_and_common_attributes()
|
||||
|
||||
container_name = os.getenv("CONTAINER_NAME")
|
||||
if container_name is not None:
|
||||
common_attributes["container.name"] = container_name
|
||||
|
||||
# Fallback to reading namespace from a file is the env var is not set
|
||||
pod_namespace = os.getenv("NAMESPACE")
|
||||
if pod_namespace is None:
|
||||
try:
|
||||
with open(
|
||||
"/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r"
|
||||
) as namespace_file:
|
||||
pod_namespace = namespace_file.read().strip()
|
||||
except FileNotFoundError:
|
||||
pod_namespace = ""
|
||||
|
||||
common_attributes.update(
|
||||
{
|
||||
"k8s.cluster.name": all_metadata["instance"]["attributes"][
|
||||
"cluster-name"
|
||||
],
|
||||
"k8s.namespace.name": pod_namespace,
|
||||
"k8s.pod.name": os.getenv("POD_NAME", os.getenv("HOSTNAME", "")),
|
||||
"host.id": all_metadata["instance"]["id"],
|
||||
"gcp.resource_type": "gke_container",
|
||||
}
|
||||
)
|
||||
return common_attributes
|
||||
|
||||
|
||||
def get_cloudrun_resources():
|
||||
"""Resource finder for Cloud Run attributes"""
|
||||
|
||||
if os.getenv("K_CONFIGURATION") is None:
|
||||
return {}
|
||||
|
||||
(
|
||||
common_attributes,
|
||||
all_metadata,
|
||||
) = _get_google_metadata_and_common_attributes()
|
||||
|
||||
faas_name = os.getenv("K_SERVICE")
|
||||
if faas_name is not None:
|
||||
common_attributes["faas.name"] = str(faas_name)
|
||||
|
||||
faas_version = os.getenv("K_REVISION")
|
||||
if faas_version is not None:
|
||||
common_attributes["faas.version"] = str(faas_version)
|
||||
|
||||
common_attributes.update(
|
||||
{
|
||||
"cloud.platform": "gcp_cloud_run",
|
||||
"cloud.region": all_metadata["instance"]["region"].split("/")[-1],
|
||||
"faas.instance": all_metadata["instance"]["id"],
|
||||
"gcp.resource_type": "cloud_run",
|
||||
}
|
||||
)
|
||||
return common_attributes
|
||||
|
||||
|
||||
def get_cloudfunctions_resources():
|
||||
"""Resource finder for Cloud Functions attributes"""
|
||||
|
||||
if os.getenv("FUNCTION_TARGET") is None:
|
||||
return {}
|
||||
|
||||
(
|
||||
common_attributes,
|
||||
all_metadata,
|
||||
) = _get_google_metadata_and_common_attributes()
|
||||
|
||||
faas_name = os.getenv("K_SERVICE")
|
||||
if faas_name is not None:
|
||||
common_attributes["faas.name"] = str(faas_name)
|
||||
|
||||
faas_version = os.getenv("K_REVISION")
|
||||
if faas_version is not None:
|
||||
common_attributes["faas.version"] = str(faas_version)
|
||||
|
||||
common_attributes.update(
|
||||
{
|
||||
"cloud.platform": "gcp_cloud_functions",
|
||||
"cloud.region": all_metadata["instance"]["region"].split("/")[-1],
|
||||
"faas.instance": all_metadata["instance"]["id"],
|
||||
"gcp.resource_type": "cloud_functions",
|
||||
}
|
||||
)
|
||||
return common_attributes
|
||||
|
||||
|
||||
# Order here matters. Since a GKE_CONTAINER is a specialized type of GCE_INSTANCE
|
||||
# We need to first check if it matches the criteria for being a GKE_CONTAINER
|
||||
# before falling back and checking if its a GCE_INSTANCE.
|
||||
# This list should be sorted from most specialized to least specialized.
|
||||
_RESOURCE_FINDERS = [
|
||||
("gke_container", get_gke_resources),
|
||||
("cloud_run", get_cloudrun_resources),
|
||||
("cloud_functions", get_cloudfunctions_resources),
|
||||
("gce_instance", get_gce_resources),
|
||||
]
|
||||
|
||||
|
||||
class NoGoogleResourcesFound(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class GoogleCloudResourceDetector(ResourceDetector):
|
||||
def __init__(self, raise_on_error=False):
|
||||
super().__init__(raise_on_error)
|
||||
self.cached = False
|
||||
self.gcp_resources = {}
|
||||
|
||||
def detect(self) -> "Resource":
|
||||
if not self.cached:
|
||||
self.cached = True
|
||||
for resource_type, resource_finder in _RESOURCE_FINDERS:
|
||||
try:
|
||||
found_resources = resource_finder()
|
||||
# pylint: disable=broad-except
|
||||
except Exception as ex:
|
||||
logger.warning(
|
||||
"Exception %s occured attempting %s resource detection",
|
||||
ex,
|
||||
resource_type,
|
||||
)
|
||||
found_resources = None
|
||||
if found_resources:
|
||||
self.gcp_resources = found_resources
|
||||
break
|
||||
if self.raise_on_error and not self.gcp_resources:
|
||||
raise NoGoogleResourcesFound()
|
||||
return Resource(self.gcp_resources)
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,69 @@
|
||||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# TODO: use opentelemetry-semantic-conventions package for these constants once it has
|
||||
# stabilized. Right now, pinning an unstable version would cause dependency conflicts for
|
||||
# users so these are copied in.
|
||||
class ResourceAttributes:
|
||||
AWS_EC2 = "aws_ec2"
|
||||
CLOUD_ACCOUNT_ID = "cloud.account.id"
|
||||
CLOUD_AVAILABILITY_ZONE = "cloud.availability_zone"
|
||||
CLOUD_PLATFORM_KEY = "cloud.platform"
|
||||
CLOUD_PROVIDER = "cloud.provider"
|
||||
CLOUD_REGION = "cloud.region"
|
||||
FAAS_INSTANCE = "faas.instance"
|
||||
FAAS_NAME = "faas.name"
|
||||
FAAS_VERSION = "faas.version"
|
||||
GCP_APP_ENGINE = "gcp_app_engine"
|
||||
GCP_CLOUD_FUNCTIONS = "gcp_cloud_functions"
|
||||
GCP_CLOUD_RUN = "gcp_cloud_run"
|
||||
GCP_COMPUTE_ENGINE = "gcp_compute_engine"
|
||||
GCP_KUBERNETES_ENGINE = "gcp_kubernetes_engine"
|
||||
HOST_ID = "host.id"
|
||||
HOST_NAME = "host.name"
|
||||
HOST_TYPE = "host.type"
|
||||
K8S_CLUSTER_NAME = "k8s.cluster.name"
|
||||
K8S_CONTAINER_NAME = "k8s.container.name"
|
||||
K8S_NAMESPACE_NAME = "k8s.namespace.name"
|
||||
K8S_NODE_NAME = "k8s.node.name"
|
||||
K8S_POD_NAME = "k8s.pod.name"
|
||||
SERVICE_INSTANCE_ID = "service.instance.id"
|
||||
SERVICE_NAME = "service.name"
|
||||
SERVICE_NAMESPACE = "service.namespace"
|
||||
|
||||
|
||||
AWS_ACCOUNT = "aws_account"
|
||||
AWS_EC2_INSTANCE = "aws_ec2_instance"
|
||||
CLUSTER_NAME = "cluster_name"
|
||||
CONTAINER_NAME = "container_name"
|
||||
GCE_INSTANCE = "gce_instance"
|
||||
GENERIC_NODE = "generic_node"
|
||||
GENERIC_TASK = "generic_task"
|
||||
INSTANCE_ID = "instance_id"
|
||||
JOB = "job"
|
||||
K8S_CLUSTER = "k8s_cluster"
|
||||
K8S_CONTAINER = "k8s_container"
|
||||
K8S_NODE = "k8s_node"
|
||||
K8S_POD = "k8s_pod"
|
||||
LOCATION = "location"
|
||||
NAMESPACE = "namespace"
|
||||
NAMESPACE_NAME = "namespace_name"
|
||||
NODE_ID = "node_id"
|
||||
NODE_NAME = "node_name"
|
||||
POD_NAME = "pod_name"
|
||||
REGION = "region"
|
||||
TASK_ID = "task_id"
|
||||
ZONE = "zone"
|
||||
UNKNOWN_SERVICE_PREFIX = "unknown_service"
|
||||
@@ -0,0 +1,140 @@
|
||||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from typing import Mapping
|
||||
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import (
|
||||
_faas,
|
||||
_gae,
|
||||
_gce,
|
||||
_gke,
|
||||
_metadata,
|
||||
)
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector._constants import (
|
||||
ResourceAttributes,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Resource, ResourceDetector
|
||||
from opentelemetry.util.types import AttributeValue
|
||||
|
||||
|
||||
class GoogleCloudResourceDetector(ResourceDetector):
|
||||
def detect(self) -> Resource:
|
||||
# pylint: disable=too-many-return-statements
|
||||
if not _metadata.is_available():
|
||||
return Resource.get_empty()
|
||||
|
||||
if _gke.on_gke():
|
||||
return _gke_resource()
|
||||
if _faas.on_cloud_functions():
|
||||
return _cloud_functions_resource()
|
||||
if _faas.on_cloud_run():
|
||||
return _cloud_run_resource()
|
||||
if _gae.on_app_engine():
|
||||
return _gae_resource()
|
||||
if _gce.on_gce():
|
||||
return _gce_resource()
|
||||
|
||||
return Resource.get_empty()
|
||||
|
||||
|
||||
def _gke_resource() -> Resource:
|
||||
zone_or_region = _gke.availability_zone_or_region()
|
||||
zone_or_region_key = (
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE
|
||||
if zone_or_region.type == "zone"
|
||||
else ResourceAttributes.CLOUD_REGION
|
||||
)
|
||||
return _make_resource(
|
||||
{
|
||||
ResourceAttributes.CLOUD_PLATFORM_KEY: ResourceAttributes.GCP_KUBERNETES_ENGINE,
|
||||
zone_or_region_key: zone_or_region.value,
|
||||
ResourceAttributes.K8S_CLUSTER_NAME: _gke.cluster_name(),
|
||||
ResourceAttributes.HOST_ID: _gke.host_id(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _gce_resource() -> Resource:
|
||||
zone_and_region = _gce.availability_zone_and_region()
|
||||
return _make_resource(
|
||||
{
|
||||
ResourceAttributes.CLOUD_PLATFORM_KEY: ResourceAttributes.GCP_COMPUTE_ENGINE,
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE: zone_and_region.zone,
|
||||
ResourceAttributes.CLOUD_REGION: zone_and_region.region,
|
||||
ResourceAttributes.HOST_TYPE: _gce.host_type(),
|
||||
ResourceAttributes.HOST_ID: _gce.host_id(),
|
||||
ResourceAttributes.HOST_NAME: _gce.host_name(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _cloud_run_resource() -> Resource:
|
||||
return _make_resource(
|
||||
{
|
||||
ResourceAttributes.CLOUD_PLATFORM_KEY: ResourceAttributes.GCP_CLOUD_RUN,
|
||||
ResourceAttributes.FAAS_NAME: _faas.faas_name(),
|
||||
ResourceAttributes.FAAS_VERSION: _faas.faas_version(),
|
||||
ResourceAttributes.FAAS_INSTANCE: _faas.faas_instance(),
|
||||
ResourceAttributes.CLOUD_REGION: _faas.faas_cloud_region(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _cloud_functions_resource() -> Resource:
|
||||
return _make_resource(
|
||||
{
|
||||
ResourceAttributes.CLOUD_PLATFORM_KEY: ResourceAttributes.GCP_CLOUD_FUNCTIONS,
|
||||
ResourceAttributes.FAAS_NAME: _faas.faas_name(),
|
||||
ResourceAttributes.FAAS_VERSION: _faas.faas_version(),
|
||||
ResourceAttributes.FAAS_INSTANCE: _faas.faas_instance(),
|
||||
ResourceAttributes.CLOUD_REGION: _faas.faas_cloud_region(),
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _gae_resource() -> Resource:
|
||||
if _gae.on_app_engine_standard():
|
||||
zone = _gae.standard_availability_zone()
|
||||
region = _gae.standard_cloud_region()
|
||||
else:
|
||||
zone_and_region = _gae.flex_availability_zone_and_region()
|
||||
zone = zone_and_region.zone
|
||||
region = zone_and_region.region
|
||||
|
||||
faas_name = _gae.service_name()
|
||||
faas_version = _gae.service_version()
|
||||
faas_instance = _gae.service_instance()
|
||||
|
||||
return _make_resource(
|
||||
{
|
||||
ResourceAttributes.CLOUD_PLATFORM_KEY: ResourceAttributes.GCP_APP_ENGINE,
|
||||
ResourceAttributes.FAAS_NAME: faas_name,
|
||||
ResourceAttributes.FAAS_VERSION: faas_version,
|
||||
ResourceAttributes.FAAS_INSTANCE: faas_instance,
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE: zone,
|
||||
ResourceAttributes.CLOUD_REGION: region,
|
||||
}
|
||||
)
|
||||
|
||||
|
||||
def _make_resource(attrs: Mapping[str, AttributeValue]) -> Resource:
|
||||
return Resource(
|
||||
{
|
||||
ResourceAttributes.CLOUD_PROVIDER: "gcp",
|
||||
ResourceAttributes.CLOUD_ACCOUNT_ID: _metadata.get_metadata()[
|
||||
"project"
|
||||
]["projectId"],
|
||||
**attrs,
|
||||
}
|
||||
)
|
||||
@@ -0,0 +1,60 @@
|
||||
# Copyright 2024 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Implementation in this file copied from
|
||||
# https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/v1.8.0/detectors/gcp/faas.go
|
||||
|
||||
import os
|
||||
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import _metadata
|
||||
|
||||
_CLOUD_RUN_CONFIG_ENV = "K_CONFIGURATION"
|
||||
_CLOUD_FUNCTION_TARGET_ENV = "FUNCTION_TARGET"
|
||||
_FAAS_SERVICE_ENV = "K_SERVICE"
|
||||
_FAAS_REVISION_ENV = "K_REVISION"
|
||||
|
||||
|
||||
def on_cloud_run() -> bool:
|
||||
return _CLOUD_RUN_CONFIG_ENV in os.environ
|
||||
|
||||
|
||||
def on_cloud_functions() -> bool:
|
||||
return _CLOUD_FUNCTION_TARGET_ENV in os.environ
|
||||
|
||||
|
||||
def faas_name() -> str:
|
||||
"""The name of the Cloud Run or Cloud Function.
|
||||
|
||||
Check that on_cloud_run() or on_cloud_functions() is true before calling this, or it may
|
||||
throw exceptions.
|
||||
"""
|
||||
return os.environ[_FAAS_SERVICE_ENV]
|
||||
|
||||
|
||||
def faas_version() -> str:
|
||||
"""The version/revision of the Cloud Run or Cloud Function.
|
||||
|
||||
Check that on_cloud_run() or on_cloud_functions() is true before calling this, or it may
|
||||
throw exceptions.
|
||||
"""
|
||||
return os.environ[_FAAS_REVISION_ENV]
|
||||
|
||||
|
||||
def faas_instance() -> str:
|
||||
return str(_metadata.get_metadata()["instance"]["id"])
|
||||
|
||||
|
||||
def faas_cloud_region() -> str:
|
||||
region = _metadata.get_metadata()["instance"]["region"]
|
||||
return region[region.rfind("/") + 1 :]
|
||||
@@ -0,0 +1,88 @@
|
||||
# Copyright 2024 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Implementation in this file copied from
|
||||
# https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/v1.8.0/detectors/gcp/app_engine.go
|
||||
|
||||
import os
|
||||
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import (
|
||||
_faas,
|
||||
_gce,
|
||||
_metadata,
|
||||
)
|
||||
|
||||
_GAE_SERVICE_ENV = "GAE_SERVICE"
|
||||
_GAE_VERSION_ENV = "GAE_VERSION"
|
||||
_GAE_INSTANCE_ENV = "GAE_INSTANCE"
|
||||
_GAE_ENV = "GAE_ENV"
|
||||
_GAE_STANDARD = "standard"
|
||||
|
||||
|
||||
def on_app_engine_standard() -> bool:
|
||||
return os.environ.get(_GAE_ENV) == _GAE_STANDARD
|
||||
|
||||
|
||||
def on_app_engine() -> bool:
|
||||
return _GAE_SERVICE_ENV in os.environ
|
||||
|
||||
|
||||
def service_name() -> str:
|
||||
"""The service name of the app engine service.
|
||||
|
||||
Check that ``on_app_engine()`` is true before calling this, or it may throw exceptions.
|
||||
"""
|
||||
return os.environ[_GAE_SERVICE_ENV]
|
||||
|
||||
|
||||
def service_version() -> str:
|
||||
"""The service version of the app engine service.
|
||||
|
||||
Check that ``on_app_engine()`` is true before calling this, or it may throw exceptions.
|
||||
"""
|
||||
return os.environ[_GAE_VERSION_ENV]
|
||||
|
||||
|
||||
def service_instance() -> str:
|
||||
"""The service instance of the app engine service.
|
||||
|
||||
Check that ``on_app_engine()`` is true before calling this, or it may throw exceptions.
|
||||
"""
|
||||
return os.environ[_GAE_INSTANCE_ENV]
|
||||
|
||||
|
||||
def flex_availability_zone_and_region() -> _gce.ZoneAndRegion:
|
||||
"""The zone and region in which this program is running.
|
||||
|
||||
Check that ``on_app_engine()`` is true before calling this, or it may throw exceptions.
|
||||
"""
|
||||
return _gce.availability_zone_and_region()
|
||||
|
||||
|
||||
def standard_availability_zone() -> str:
|
||||
"""The zone the app engine service is running in.
|
||||
|
||||
Check that ``on_app_engine_standard()`` is true before calling this, or it may throw exceptions.
|
||||
"""
|
||||
zone = _metadata.get_metadata()["instance"]["zone"]
|
||||
# zone is of the form "projects/233510669999/zones/us15"
|
||||
return zone[zone.rfind("/") + 1 :]
|
||||
|
||||
|
||||
def standard_cloud_region() -> str:
|
||||
"""The region the app engine service is running in.
|
||||
|
||||
Check that ``on_app_engine_standard()`` is true before calling this, or it may throw exceptions.
|
||||
"""
|
||||
return _faas.faas_cloud_region()
|
||||
@@ -0,0 +1,72 @@
|
||||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
import re
|
||||
from dataclasses import dataclass
|
||||
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import _metadata
|
||||
|
||||
# Format described in
|
||||
# https://cloud.google.com/compute/docs/metadata/default-metadata-values#vm_instance_metadata
|
||||
_ZONE_REGION_RE = re.compile(
|
||||
r"projects\/\d+\/zones\/(?P<zone>(?P<region>\w+-\w+)-\w+)"
|
||||
)
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def on_gce() -> bool:
|
||||
try:
|
||||
_metadata.get_metadata()["instance"]["machineType"]
|
||||
except (_metadata.MetadataAccessException, KeyError):
|
||||
_logger.debug(
|
||||
"Could not fetch metadata attribute instance/machineType, "
|
||||
"assuming not on GCE.",
|
||||
exc_info=True,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def host_type() -> str:
|
||||
return _metadata.get_metadata()["instance"]["machineType"]
|
||||
|
||||
|
||||
def host_id() -> str:
|
||||
return str(_metadata.get_metadata()["instance"]["id"])
|
||||
|
||||
|
||||
def host_name() -> str:
|
||||
return _metadata.get_metadata()["instance"]["name"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ZoneAndRegion:
|
||||
zone: str
|
||||
region: str
|
||||
|
||||
|
||||
def availability_zone_and_region() -> ZoneAndRegion:
|
||||
full_zone = _metadata.get_metadata()["instance"]["zone"]
|
||||
match = _ZONE_REGION_RE.search(full_zone)
|
||||
if not match:
|
||||
raise Exception(
|
||||
"zone was not in the expected format: "
|
||||
f"projects/PROJECT_NUM/zones/COUNTRY-REGION-ZONE. Got {full_zone}"
|
||||
)
|
||||
|
||||
return ZoneAndRegion(
|
||||
zone=match.group("zone"), region=match.group("region")
|
||||
)
|
||||
@@ -0,0 +1,58 @@
|
||||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from dataclasses import dataclass
|
||||
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import (
|
||||
_gce,
|
||||
_metadata,
|
||||
)
|
||||
|
||||
# TODO: remove when Python 3.7 is dropped
|
||||
from typing_extensions import Literal
|
||||
|
||||
KUBERNETES_SERVICE_HOST_ENV = "KUBERNETES_SERVICE_HOST"
|
||||
|
||||
|
||||
def on_gke() -> bool:
|
||||
return os.environ.get(KUBERNETES_SERVICE_HOST_ENV) is not None
|
||||
|
||||
|
||||
def host_id() -> str:
|
||||
return _gce.host_id()
|
||||
|
||||
|
||||
def cluster_name() -> str:
|
||||
return _metadata.get_metadata()["instance"]["attributes"]["cluster-name"]
|
||||
|
||||
|
||||
@dataclass
|
||||
class ZoneOrRegion:
|
||||
type: Literal["zone", "region"]
|
||||
value: str
|
||||
|
||||
|
||||
def availability_zone_or_region() -> ZoneOrRegion:
|
||||
cluster_location = _metadata.get_metadata()["instance"]["attributes"][
|
||||
"cluster-location"
|
||||
]
|
||||
hyphen_count = cluster_location.count("-")
|
||||
if hyphen_count == 1:
|
||||
return ZoneOrRegion(type="region", value=cluster_location)
|
||||
if hyphen_count == 2:
|
||||
return ZoneOrRegion(type="zone", value=cluster_location)
|
||||
raise Exception(
|
||||
f"unrecognized format for cluster location: {cluster_location}"
|
||||
)
|
||||
@@ -0,0 +1,222 @@
|
||||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import json
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Mapping, Optional, Tuple
|
||||
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector import _constants
|
||||
from opentelemetry.resourcedetector.gcp_resource_detector._constants import (
|
||||
ResourceAttributes,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Attributes, Resource
|
||||
|
||||
|
||||
class MapConfig:
|
||||
otel_keys: Tuple[str, ...]
|
||||
"""
|
||||
OTel resource keys to try and populate the resource label from. For entries with multiple
|
||||
OTel resource keys, the keys' values will be coalesced in order until there is a non-empty
|
||||
value.
|
||||
"""
|
||||
|
||||
fallback: str
|
||||
"""If none of the otelKeys are present in the Resource, fallback to this literal value"""
|
||||
|
||||
def __init__(self, *otel_keys: str, fallback: str = ""):
|
||||
self.otel_keys = otel_keys
|
||||
self.fallback = fallback
|
||||
|
||||
|
||||
# Mappings of GCM resource label keys onto mapping config from OTel resource for a given
|
||||
# monitored resource type. Copied from Go impl:
|
||||
# https://github.com/GoogleCloudPlatform/opentelemetry-operations-go/blob/v1.8.0/internal/resourcemapping/resourcemapping.go#L51
|
||||
MAPPINGS = {
|
||||
_constants.GCE_INSTANCE: {
|
||||
_constants.ZONE: MapConfig(ResourceAttributes.CLOUD_AVAILABILITY_ZONE),
|
||||
_constants.INSTANCE_ID: MapConfig(ResourceAttributes.HOST_ID),
|
||||
},
|
||||
_constants.K8S_CONTAINER: {
|
||||
_constants.LOCATION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
),
|
||||
_constants.CLUSTER_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_CLUSTER_NAME
|
||||
),
|
||||
_constants.NAMESPACE_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_NAMESPACE_NAME
|
||||
),
|
||||
_constants.POD_NAME: MapConfig(ResourceAttributes.K8S_POD_NAME),
|
||||
_constants.CONTAINER_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_CONTAINER_NAME
|
||||
),
|
||||
},
|
||||
_constants.K8S_POD: {
|
||||
_constants.LOCATION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
),
|
||||
_constants.CLUSTER_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_CLUSTER_NAME
|
||||
),
|
||||
_constants.NAMESPACE_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_NAMESPACE_NAME
|
||||
),
|
||||
_constants.POD_NAME: MapConfig(ResourceAttributes.K8S_POD_NAME),
|
||||
},
|
||||
_constants.K8S_NODE: {
|
||||
_constants.LOCATION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
),
|
||||
_constants.CLUSTER_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_CLUSTER_NAME
|
||||
),
|
||||
_constants.NODE_NAME: MapConfig(ResourceAttributes.K8S_NODE_NAME),
|
||||
},
|
||||
_constants.K8S_CLUSTER: {
|
||||
_constants.LOCATION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
),
|
||||
_constants.CLUSTER_NAME: MapConfig(
|
||||
ResourceAttributes.K8S_CLUSTER_NAME
|
||||
),
|
||||
},
|
||||
_constants.AWS_EC2_INSTANCE: {
|
||||
_constants.INSTANCE_ID: MapConfig(ResourceAttributes.HOST_ID),
|
||||
_constants.REGION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
),
|
||||
_constants.AWS_ACCOUNT: MapConfig(ResourceAttributes.CLOUD_ACCOUNT_ID),
|
||||
},
|
||||
_constants.GENERIC_TASK: {
|
||||
_constants.LOCATION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
fallback="global",
|
||||
),
|
||||
_constants.NAMESPACE: MapConfig(ResourceAttributes.SERVICE_NAMESPACE),
|
||||
_constants.JOB: MapConfig(
|
||||
ResourceAttributes.SERVICE_NAME,
|
||||
ResourceAttributes.FAAS_NAME,
|
||||
),
|
||||
_constants.TASK_ID: MapConfig(
|
||||
ResourceAttributes.SERVICE_INSTANCE_ID,
|
||||
ResourceAttributes.FAAS_INSTANCE,
|
||||
),
|
||||
},
|
||||
_constants.GENERIC_NODE: {
|
||||
_constants.LOCATION: MapConfig(
|
||||
ResourceAttributes.CLOUD_AVAILABILITY_ZONE,
|
||||
ResourceAttributes.CLOUD_REGION,
|
||||
fallback="global",
|
||||
),
|
||||
_constants.NAMESPACE: MapConfig(ResourceAttributes.SERVICE_NAMESPACE),
|
||||
_constants.NODE_ID: MapConfig(
|
||||
ResourceAttributes.HOST_ID, ResourceAttributes.HOST_NAME
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class MonitoredResourceData:
|
||||
"""Dataclass representing a protobuf monitored resource. Make sure to convert to a protobuf
|
||||
if needed."""
|
||||
|
||||
type: str
|
||||
labels: Mapping[str, str]
|
||||
|
||||
|
||||
def get_monitored_resource(
|
||||
resource: Resource,
|
||||
) -> Optional[MonitoredResourceData]:
|
||||
"""Add Google resource specific information (e.g. instance id, region).
|
||||
|
||||
See
|
||||
https://cloud.google.com/monitoring/custom-metrics/creating-metrics#custom-metric-resources
|
||||
for supported types
|
||||
Args:
|
||||
resource: OTel resource
|
||||
"""
|
||||
|
||||
attrs = resource.attributes
|
||||
|
||||
platform = attrs.get(ResourceAttributes.CLOUD_PLATFORM_KEY)
|
||||
if platform == ResourceAttributes.GCP_COMPUTE_ENGINE:
|
||||
mr = _create_monitored_resource(_constants.GCE_INSTANCE, attrs)
|
||||
elif platform == ResourceAttributes.GCP_KUBERNETES_ENGINE:
|
||||
if ResourceAttributes.K8S_CONTAINER_NAME in attrs:
|
||||
mr = _create_monitored_resource(_constants.K8S_CONTAINER, attrs)
|
||||
elif ResourceAttributes.K8S_POD_NAME in attrs:
|
||||
mr = _create_monitored_resource(_constants.K8S_POD, attrs)
|
||||
elif ResourceAttributes.K8S_NODE_NAME in attrs:
|
||||
mr = _create_monitored_resource(_constants.K8S_NODE, attrs)
|
||||
else:
|
||||
mr = _create_monitored_resource(_constants.K8S_CLUSTER, attrs)
|
||||
elif platform == ResourceAttributes.AWS_EC2:
|
||||
mr = _create_monitored_resource(_constants.AWS_EC2_INSTANCE, attrs)
|
||||
else:
|
||||
# fallback to generic_task
|
||||
if (
|
||||
ResourceAttributes.SERVICE_NAME in attrs
|
||||
or ResourceAttributes.FAAS_NAME in attrs
|
||||
) and (
|
||||
ResourceAttributes.SERVICE_INSTANCE_ID in attrs
|
||||
or ResourceAttributes.FAAS_INSTANCE in attrs
|
||||
):
|
||||
mr = _create_monitored_resource(_constants.GENERIC_TASK, attrs)
|
||||
else:
|
||||
mr = _create_monitored_resource(_constants.GENERIC_NODE, attrs)
|
||||
|
||||
return mr
|
||||
|
||||
|
||||
def _create_monitored_resource(
|
||||
monitored_resource_type: str, resource_attrs: Attributes
|
||||
) -> MonitoredResourceData:
|
||||
mapping = MAPPINGS[monitored_resource_type]
|
||||
labels: Dict[str, str] = {}
|
||||
|
||||
for mr_key, map_config in mapping.items():
|
||||
mr_value = None
|
||||
for otel_key in map_config.otel_keys:
|
||||
if otel_key in resource_attrs and not str(
|
||||
resource_attrs[otel_key]
|
||||
).startswith(_constants.UNKNOWN_SERVICE_PREFIX):
|
||||
mr_value = resource_attrs[otel_key]
|
||||
break
|
||||
|
||||
if (
|
||||
mr_value is None
|
||||
and ResourceAttributes.SERVICE_NAME in map_config.otel_keys
|
||||
):
|
||||
# The service name started with unknown_service, and was ignored above.
|
||||
mr_value = resource_attrs.get(ResourceAttributes.SERVICE_NAME)
|
||||
|
||||
if mr_value is None:
|
||||
mr_value = map_config.fallback
|
||||
|
||||
# OTel attribute values can be any of str, bool, int, float, or Sequence of any of
|
||||
# them. Encode any non-strings as json string
|
||||
if not isinstance(mr_value, str):
|
||||
mr_value = json.dumps(
|
||||
mr_value, sort_keys=True, indent=None, separators=(",", ":")
|
||||
)
|
||||
labels[mr_key] = mr_value
|
||||
|
||||
return MonitoredResourceData(type=monitored_resource_type, labels=labels)
|
||||
@@ -0,0 +1,96 @@
|
||||
# Copyright 2023 Google LLC
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# https://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import logging
|
||||
from functools import lru_cache
|
||||
from typing import Union
|
||||
|
||||
import requests
|
||||
|
||||
# TODO: remove when Python 3.7 is dropped
|
||||
from typing_extensions import TypedDict
|
||||
|
||||
_GCP_METADATA_URL = "http://metadata.google.internal/computeMetadata/v1/"
|
||||
_INSTANCE = "instance"
|
||||
_RECURSIVE_PARAMS = {"recursive": "true"}
|
||||
_GCP_METADATA_URL_HEADER = {"Metadata-Flavor": "Google"}
|
||||
# Use a shorter timeout for connection so we won't block much if it's unreachable
|
||||
_TIMEOUT = (2, 5)
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Project(TypedDict):
|
||||
projectId: str
|
||||
|
||||
|
||||
Attributes = TypedDict(
|
||||
"Attributes", {"cluster-location": str, "cluster-name": str}, total=False
|
||||
)
|
||||
|
||||
|
||||
class Instance(TypedDict):
|
||||
attributes: Attributes
|
||||
# id can be an integer on GCE VMs or a string on other environments
|
||||
id: Union[int, str]
|
||||
machineType: str
|
||||
name: str
|
||||
region: str
|
||||
zone: str
|
||||
|
||||
|
||||
class Metadata(TypedDict):
|
||||
instance: Instance
|
||||
project: Project
|
||||
|
||||
|
||||
class MetadataAccessException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def get_metadata() -> Metadata:
|
||||
"""Get all instance and project metadata from the metadata server
|
||||
|
||||
Cached for the lifetime of the process.
|
||||
"""
|
||||
try:
|
||||
res = requests.get(
|
||||
f"{_GCP_METADATA_URL}",
|
||||
params=_RECURSIVE_PARAMS,
|
||||
headers=_GCP_METADATA_URL_HEADER,
|
||||
timeout=_TIMEOUT,
|
||||
)
|
||||
res.raise_for_status()
|
||||
all_metadata = res.json()
|
||||
except requests.RequestException as err:
|
||||
raise MetadataAccessException() from err
|
||||
return all_metadata
|
||||
|
||||
|
||||
@lru_cache(maxsize=None)
|
||||
def is_available() -> bool:
|
||||
try:
|
||||
requests.get(
|
||||
f"{_GCP_METADATA_URL}{_INSTANCE}/",
|
||||
headers=_GCP_METADATA_URL_HEADER,
|
||||
timeout=_TIMEOUT,
|
||||
).raise_for_status()
|
||||
except requests.RequestException:
|
||||
_logger.debug(
|
||||
"Failed to make request to metadata server, assuming it's not available",
|
||||
exc_info=True,
|
||||
)
|
||||
return False
|
||||
return True
|
||||
@@ -0,0 +1,15 @@
|
||||
# Copyright 2021 The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
__version__ = "1.9.0a0"
|
||||
@@ -0,0 +1,18 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
The OpenTelemetry SDK package is an implementation of the OpenTelemetry
|
||||
API
|
||||
"""
|
||||
@@ -0,0 +1,481 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
#
|
||||
|
||||
"""
|
||||
OpenTelemetry SDK Configurator for Easy Instrumentation with Distros
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
import os
|
||||
from abc import ABC, abstractmethod
|
||||
from os import environ
|
||||
from typing import Callable, Sequence, Type, Union
|
||||
|
||||
from typing_extensions import Literal
|
||||
|
||||
from opentelemetry._events import set_event_logger_provider
|
||||
from opentelemetry._logs import set_logger_provider
|
||||
from opentelemetry.environment_variables import (
|
||||
OTEL_LOGS_EXPORTER,
|
||||
OTEL_METRICS_EXPORTER,
|
||||
OTEL_PYTHON_ID_GENERATOR,
|
||||
OTEL_TRACES_EXPORTER,
|
||||
)
|
||||
from opentelemetry.metrics import set_meter_provider
|
||||
from opentelemetry.sdk._events import EventLoggerProvider
|
||||
from opentelemetry.sdk._logs import LoggerProvider, LoggingHandler
|
||||
from opentelemetry.sdk._logs.export import BatchLogRecordProcessor, LogExporter
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED,
|
||||
OTEL_EXPORTER_OTLP_LOGS_PROTOCOL,
|
||||
OTEL_EXPORTER_OTLP_METRICS_PROTOCOL,
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL,
|
||||
OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
|
||||
OTEL_TRACES_SAMPLER,
|
||||
OTEL_TRACES_SAMPLER_ARG,
|
||||
)
|
||||
from opentelemetry.sdk.metrics import MeterProvider
|
||||
from opentelemetry.sdk.metrics.export import (
|
||||
MetricExporter,
|
||||
MetricReader,
|
||||
PeriodicExportingMetricReader,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Attributes, Resource
|
||||
from opentelemetry.sdk.trace import TracerProvider
|
||||
from opentelemetry.sdk.trace.export import BatchSpanProcessor, SpanExporter
|
||||
from opentelemetry.sdk.trace.id_generator import IdGenerator
|
||||
from opentelemetry.sdk.trace.sampling import Sampler
|
||||
from opentelemetry.semconv.resource import ResourceAttributes
|
||||
from opentelemetry.trace import set_tracer_provider
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
||||
_EXPORTER_OTLP = "otlp"
|
||||
_EXPORTER_OTLP_PROTO_GRPC = "otlp_proto_grpc"
|
||||
_EXPORTER_OTLP_PROTO_HTTP = "otlp_proto_http"
|
||||
|
||||
_EXPORTER_BY_OTLP_PROTOCOL = {
|
||||
"grpc": _EXPORTER_OTLP_PROTO_GRPC,
|
||||
"http/protobuf": _EXPORTER_OTLP_PROTO_HTTP,
|
||||
}
|
||||
|
||||
_EXPORTER_ENV_BY_SIGNAL_TYPE = {
|
||||
"traces": OTEL_TRACES_EXPORTER,
|
||||
"metrics": OTEL_METRICS_EXPORTER,
|
||||
"logs": OTEL_LOGS_EXPORTER,
|
||||
}
|
||||
|
||||
_PROTOCOL_ENV_BY_SIGNAL_TYPE = {
|
||||
"traces": OTEL_EXPORTER_OTLP_TRACES_PROTOCOL,
|
||||
"metrics": OTEL_EXPORTER_OTLP_METRICS_PROTOCOL,
|
||||
"logs": OTEL_EXPORTER_OTLP_LOGS_PROTOCOL,
|
||||
}
|
||||
|
||||
_RANDOM_ID_GENERATOR = "random"
|
||||
_DEFAULT_ID_GENERATOR = _RANDOM_ID_GENERATOR
|
||||
|
||||
_OTEL_SAMPLER_ENTRY_POINT_GROUP = "opentelemetry_traces_sampler"
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _import_config_components(
|
||||
selected_components: list[str], entry_point_name: str
|
||||
) -> Sequence[tuple[str, object]]:
|
||||
component_implementations = []
|
||||
|
||||
for selected_component in selected_components:
|
||||
try:
|
||||
component_implementations.append(
|
||||
(
|
||||
selected_component,
|
||||
next(
|
||||
iter(
|
||||
entry_points(
|
||||
group=entry_point_name, name=selected_component
|
||||
)
|
||||
)
|
||||
).load(),
|
||||
)
|
||||
)
|
||||
except KeyError:
|
||||
raise RuntimeError(
|
||||
f"Requested entry point '{entry_point_name}' not found"
|
||||
)
|
||||
|
||||
except StopIteration:
|
||||
raise RuntimeError(
|
||||
f"Requested component '{selected_component}' not found in "
|
||||
f"entry point '{entry_point_name}'"
|
||||
)
|
||||
|
||||
return component_implementations
|
||||
|
||||
|
||||
def _get_sampler() -> str | None:
|
||||
return environ.get(OTEL_TRACES_SAMPLER, None)
|
||||
|
||||
|
||||
def _get_id_generator() -> str:
|
||||
return environ.get(OTEL_PYTHON_ID_GENERATOR, _DEFAULT_ID_GENERATOR)
|
||||
|
||||
|
||||
def _get_exporter_entry_point(
|
||||
exporter_name: str, signal_type: Literal["traces", "metrics", "logs"]
|
||||
):
|
||||
if exporter_name not in (
|
||||
_EXPORTER_OTLP,
|
||||
_EXPORTER_OTLP_PROTO_GRPC,
|
||||
_EXPORTER_OTLP_PROTO_HTTP,
|
||||
):
|
||||
return exporter_name
|
||||
|
||||
# Checking env vars for OTLP protocol (grpc/http).
|
||||
otlp_protocol = environ.get(
|
||||
_PROTOCOL_ENV_BY_SIGNAL_TYPE[signal_type]
|
||||
) or environ.get(OTEL_EXPORTER_OTLP_PROTOCOL)
|
||||
|
||||
if not otlp_protocol:
|
||||
if exporter_name == _EXPORTER_OTLP:
|
||||
return _EXPORTER_OTLP_PROTO_GRPC
|
||||
return exporter_name
|
||||
|
||||
otlp_protocol = otlp_protocol.strip()
|
||||
|
||||
if exporter_name == _EXPORTER_OTLP:
|
||||
if otlp_protocol not in _EXPORTER_BY_OTLP_PROTOCOL:
|
||||
# Invalid value was set by the env var
|
||||
raise RuntimeError(
|
||||
f"Unsupported OTLP protocol '{otlp_protocol}' is configured"
|
||||
)
|
||||
|
||||
return _EXPORTER_BY_OTLP_PROTOCOL[otlp_protocol]
|
||||
|
||||
# grpc/http already specified by exporter_name, only add a warning in case
|
||||
# of a conflict.
|
||||
exporter_name_by_env = _EXPORTER_BY_OTLP_PROTOCOL.get(otlp_protocol)
|
||||
if exporter_name_by_env and exporter_name != exporter_name_by_env:
|
||||
_logger.warning(
|
||||
"Conflicting values for %s OTLP exporter protocol, using '%s'",
|
||||
signal_type,
|
||||
exporter_name,
|
||||
)
|
||||
|
||||
return exporter_name
|
||||
|
||||
|
||||
def _get_exporter_names(
|
||||
signal_type: Literal["traces", "metrics", "logs"],
|
||||
) -> Sequence[str]:
|
||||
names = environ.get(_EXPORTER_ENV_BY_SIGNAL_TYPE.get(signal_type, ""))
|
||||
|
||||
if not names or names.lower().strip() == "none":
|
||||
return []
|
||||
|
||||
return [
|
||||
_get_exporter_entry_point(_exporter.strip(), signal_type)
|
||||
for _exporter in names.split(",")
|
||||
]
|
||||
|
||||
|
||||
def _init_tracing(
|
||||
exporters: dict[str, Type[SpanExporter]],
|
||||
id_generator: IdGenerator | None = None,
|
||||
sampler: Sampler | None = None,
|
||||
resource: Resource | None = None,
|
||||
):
|
||||
provider = TracerProvider(
|
||||
id_generator=id_generator,
|
||||
sampler=sampler,
|
||||
resource=resource,
|
||||
)
|
||||
set_tracer_provider(provider)
|
||||
|
||||
for _, exporter_class in exporters.items():
|
||||
exporter_args = {}
|
||||
provider.add_span_processor(
|
||||
BatchSpanProcessor(exporter_class(**exporter_args))
|
||||
)
|
||||
|
||||
|
||||
def _init_metrics(
|
||||
exporters_or_readers: dict[
|
||||
str, Union[Type[MetricExporter], Type[MetricReader]]
|
||||
],
|
||||
resource: Resource | None = None,
|
||||
):
|
||||
metric_readers = []
|
||||
|
||||
for _, exporter_or_reader_class in exporters_or_readers.items():
|
||||
exporter_args = {}
|
||||
|
||||
if issubclass(exporter_or_reader_class, MetricReader):
|
||||
metric_readers.append(exporter_or_reader_class(**exporter_args))
|
||||
else:
|
||||
metric_readers.append(
|
||||
PeriodicExportingMetricReader(
|
||||
exporter_or_reader_class(**exporter_args)
|
||||
)
|
||||
)
|
||||
|
||||
provider = MeterProvider(resource=resource, metric_readers=metric_readers)
|
||||
set_meter_provider(provider)
|
||||
|
||||
|
||||
def _init_logging(
|
||||
exporters: dict[str, Type[LogExporter]],
|
||||
resource: Resource | None = None,
|
||||
setup_logging_handler: bool = True,
|
||||
):
|
||||
provider = LoggerProvider(resource=resource)
|
||||
set_logger_provider(provider)
|
||||
|
||||
for _, exporter_class in exporters.items():
|
||||
exporter_args = {}
|
||||
provider.add_log_record_processor(
|
||||
BatchLogRecordProcessor(exporter_class(**exporter_args))
|
||||
)
|
||||
|
||||
event_logger_provider = EventLoggerProvider(logger_provider=provider)
|
||||
set_event_logger_provider(event_logger_provider)
|
||||
|
||||
if setup_logging_handler:
|
||||
_patch_basic_config()
|
||||
|
||||
# Add OTel handler
|
||||
handler = LoggingHandler(
|
||||
level=logging.NOTSET, logger_provider=provider
|
||||
)
|
||||
logging.getLogger().addHandler(handler)
|
||||
|
||||
|
||||
def _patch_basic_config():
|
||||
original_basic_config = logging.basicConfig
|
||||
|
||||
def patched_basic_config(*args, **kwargs):
|
||||
root = logging.getLogger()
|
||||
has_only_otel = len(root.handlers) == 1 and isinstance(
|
||||
root.handlers[0], LoggingHandler
|
||||
)
|
||||
if has_only_otel:
|
||||
otel_handler = root.handlers.pop()
|
||||
original_basic_config(*args, **kwargs)
|
||||
root.addHandler(otel_handler)
|
||||
else:
|
||||
original_basic_config(*args, **kwargs)
|
||||
|
||||
logging.basicConfig = patched_basic_config
|
||||
|
||||
|
||||
def _import_exporters(
|
||||
trace_exporter_names: Sequence[str],
|
||||
metric_exporter_names: Sequence[str],
|
||||
log_exporter_names: Sequence[str],
|
||||
) -> tuple[
|
||||
dict[str, Type[SpanExporter]],
|
||||
dict[str, Union[Type[MetricExporter], Type[MetricReader]]],
|
||||
dict[str, Type[LogExporter]],
|
||||
]:
|
||||
trace_exporters = {}
|
||||
metric_exporters = {}
|
||||
log_exporters = {}
|
||||
|
||||
for (
|
||||
exporter_name,
|
||||
exporter_impl,
|
||||
) in _import_config_components(
|
||||
trace_exporter_names, "opentelemetry_traces_exporter"
|
||||
):
|
||||
if issubclass(exporter_impl, SpanExporter):
|
||||
trace_exporters[exporter_name] = exporter_impl
|
||||
else:
|
||||
raise RuntimeError(f"{exporter_name} is not a trace exporter")
|
||||
|
||||
for (
|
||||
exporter_name,
|
||||
exporter_impl,
|
||||
) in _import_config_components(
|
||||
metric_exporter_names, "opentelemetry_metrics_exporter"
|
||||
):
|
||||
# The metric exporter components may be push MetricExporter or pull exporters which
|
||||
# subclass MetricReader directly
|
||||
if issubclass(exporter_impl, (MetricExporter, MetricReader)):
|
||||
metric_exporters[exporter_name] = exporter_impl
|
||||
else:
|
||||
raise RuntimeError(f"{exporter_name} is not a metric exporter")
|
||||
|
||||
for (
|
||||
exporter_name,
|
||||
exporter_impl,
|
||||
) in _import_config_components(
|
||||
log_exporter_names, "opentelemetry_logs_exporter"
|
||||
):
|
||||
if issubclass(exporter_impl, LogExporter):
|
||||
log_exporters[exporter_name] = exporter_impl
|
||||
else:
|
||||
raise RuntimeError(f"{exporter_name} is not a log exporter")
|
||||
|
||||
return trace_exporters, metric_exporters, log_exporters
|
||||
|
||||
|
||||
def _import_sampler_factory(sampler_name: str) -> Callable[[str], Sampler]:
|
||||
_, sampler_impl = _import_config_components(
|
||||
[sampler_name.strip()], _OTEL_SAMPLER_ENTRY_POINT_GROUP
|
||||
)[0]
|
||||
return sampler_impl
|
||||
|
||||
|
||||
def _import_sampler(sampler_name: str) -> Sampler | None:
|
||||
if not sampler_name:
|
||||
return None
|
||||
try:
|
||||
sampler_factory = _import_sampler_factory(sampler_name)
|
||||
arg = None
|
||||
if sampler_name in ("traceidratio", "parentbased_traceidratio"):
|
||||
try:
|
||||
rate = float(os.getenv(OTEL_TRACES_SAMPLER_ARG))
|
||||
except (ValueError, TypeError):
|
||||
_logger.warning(
|
||||
"Could not convert TRACES_SAMPLER_ARG to float. Using default value 1.0."
|
||||
)
|
||||
rate = 1.0
|
||||
arg = rate
|
||||
else:
|
||||
arg = os.getenv(OTEL_TRACES_SAMPLER_ARG)
|
||||
|
||||
sampler = sampler_factory(arg)
|
||||
if not isinstance(sampler, Sampler):
|
||||
message = f"Sampler factory, {sampler_factory}, produced output, {sampler}, which is not a Sampler."
|
||||
_logger.warning(message)
|
||||
raise ValueError(message)
|
||||
return sampler
|
||||
except Exception as exc: # pylint: disable=broad-exception-caught
|
||||
_logger.warning(
|
||||
"Using default sampler. Failed to initialize sampler, %s: %s",
|
||||
sampler_name,
|
||||
exc,
|
||||
)
|
||||
return None
|
||||
|
||||
|
||||
def _import_id_generator(id_generator_name: str) -> IdGenerator:
|
||||
id_generator_name, id_generator_impl = _import_config_components(
|
||||
[id_generator_name.strip()], "opentelemetry_id_generator"
|
||||
)[0]
|
||||
|
||||
if issubclass(id_generator_impl, IdGenerator):
|
||||
return id_generator_impl()
|
||||
|
||||
raise RuntimeError(f"{id_generator_name} is not an IdGenerator")
|
||||
|
||||
|
||||
def _initialize_components(
|
||||
auto_instrumentation_version: str | None = None,
|
||||
trace_exporter_names: list[str] | None = None,
|
||||
metric_exporter_names: list[str] | None = None,
|
||||
log_exporter_names: list[str] | None = None,
|
||||
sampler: Sampler | None = None,
|
||||
resource_attributes: Attributes | None = None,
|
||||
id_generator: IdGenerator | None = None,
|
||||
setup_logging_handler: bool | None = None,
|
||||
):
|
||||
if trace_exporter_names is None:
|
||||
trace_exporter_names = []
|
||||
if metric_exporter_names is None:
|
||||
metric_exporter_names = []
|
||||
if log_exporter_names is None:
|
||||
log_exporter_names = []
|
||||
span_exporters, metric_exporters, log_exporters = _import_exporters(
|
||||
trace_exporter_names + _get_exporter_names("traces"),
|
||||
metric_exporter_names + _get_exporter_names("metrics"),
|
||||
log_exporter_names + _get_exporter_names("logs"),
|
||||
)
|
||||
if sampler is None:
|
||||
sampler_name = _get_sampler()
|
||||
sampler = _import_sampler(sampler_name)
|
||||
if id_generator is None:
|
||||
id_generator_name = _get_id_generator()
|
||||
id_generator = _import_id_generator(id_generator_name)
|
||||
if resource_attributes is None:
|
||||
resource_attributes = {}
|
||||
# populate version if using auto-instrumentation
|
||||
if auto_instrumentation_version:
|
||||
resource_attributes[ResourceAttributes.TELEMETRY_AUTO_VERSION] = (
|
||||
auto_instrumentation_version
|
||||
)
|
||||
# if env var OTEL_RESOURCE_ATTRIBUTES is given, it will read the service_name
|
||||
# from the env variable else defaults to "unknown_service"
|
||||
resource = Resource.create(resource_attributes)
|
||||
|
||||
_init_tracing(
|
||||
exporters=span_exporters,
|
||||
id_generator=id_generator,
|
||||
sampler=sampler,
|
||||
resource=resource,
|
||||
)
|
||||
_init_metrics(metric_exporters, resource)
|
||||
if setup_logging_handler is None:
|
||||
setup_logging_handler = (
|
||||
os.getenv(
|
||||
_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED, "false"
|
||||
)
|
||||
.strip()
|
||||
.lower()
|
||||
== "true"
|
||||
)
|
||||
_init_logging(log_exporters, resource, setup_logging_handler)
|
||||
|
||||
|
||||
class _BaseConfigurator(ABC):
|
||||
"""An ABC for configurators
|
||||
|
||||
Configurators are used to configure
|
||||
SDKs (i.e. TracerProvider, MeterProvider, Processors...)
|
||||
to reduce the amount of manual configuration required.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
_is_instrumented = False
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if cls._instance is None:
|
||||
cls._instance = object.__new__(cls, *args, **kwargs)
|
||||
|
||||
return cls._instance
|
||||
|
||||
@abstractmethod
|
||||
def _configure(self, **kwargs):
|
||||
"""Configure the SDK"""
|
||||
|
||||
def configure(self, **kwargs):
|
||||
"""Configure the SDK"""
|
||||
self._configure(**kwargs)
|
||||
|
||||
|
||||
class _OTelSDKConfigurator(_BaseConfigurator):
|
||||
"""A basic Configurator by OTel Python for initializing OTel SDK components
|
||||
|
||||
Initializes several crucial OTel SDK components (i.e. TracerProvider,
|
||||
MeterProvider, Processors...) according to a default implementation. Other
|
||||
Configurators can subclass and slightly alter this initialization.
|
||||
|
||||
NOTE: This class should not be instantiated nor should it become an entry
|
||||
point on the `opentelemetry-sdk` package. Instead, distros should subclass
|
||||
this Configurator and enhance it as needed.
|
||||
"""
|
||||
|
||||
def _configure(self, **kwargs):
|
||||
_initialize_components(**kwargs)
|
||||
Binary file not shown.
@@ -0,0 +1,89 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import logging
|
||||
from time import time_ns
|
||||
from typing import Optional
|
||||
|
||||
from opentelemetry import trace
|
||||
from opentelemetry._events import Event
|
||||
from opentelemetry._events import EventLogger as APIEventLogger
|
||||
from opentelemetry._events import EventLoggerProvider as APIEventLoggerProvider
|
||||
from opentelemetry._logs import NoOpLogger, SeverityNumber, get_logger_provider
|
||||
from opentelemetry.sdk._logs import Logger, LoggerProvider, LogRecord
|
||||
from opentelemetry.util.types import Attributes
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class EventLogger(APIEventLogger):
|
||||
def __init__(
|
||||
self,
|
||||
logger_provider: LoggerProvider,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
):
|
||||
super().__init__(
|
||||
name=name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
self._logger: Logger = logger_provider.get_logger(
|
||||
name, version, schema_url, attributes
|
||||
)
|
||||
|
||||
def emit(self, event: Event) -> None:
|
||||
if isinstance(self._logger, NoOpLogger):
|
||||
# Do nothing if SDK is disabled
|
||||
return
|
||||
span_context = trace.get_current_span().get_span_context()
|
||||
log_record = LogRecord(
|
||||
timestamp=event.timestamp or time_ns(),
|
||||
observed_timestamp=None,
|
||||
trace_id=event.trace_id or span_context.trace_id,
|
||||
span_id=event.span_id or span_context.span_id,
|
||||
trace_flags=event.trace_flags or span_context.trace_flags,
|
||||
severity_text=None,
|
||||
severity_number=event.severity_number or SeverityNumber.INFO,
|
||||
body=event.body,
|
||||
resource=getattr(self._logger, "resource", None),
|
||||
attributes=event.attributes,
|
||||
)
|
||||
self._logger.emit(log_record)
|
||||
|
||||
|
||||
class EventLoggerProvider(APIEventLoggerProvider):
|
||||
def __init__(self, logger_provider: Optional[LoggerProvider] = None):
|
||||
self._logger_provider = logger_provider or get_logger_provider()
|
||||
|
||||
def get_event_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> EventLogger:
|
||||
if not name:
|
||||
_logger.warning("EventLogger created with invalid name: %s", name)
|
||||
return EventLogger(
|
||||
self._logger_provider, name, version, schema_url, attributes
|
||||
)
|
||||
|
||||
def shutdown(self):
|
||||
self._logger_provider.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
self._logger_provider.force_flush(timeout_millis)
|
||||
Binary file not shown.
@@ -0,0 +1,36 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from opentelemetry.sdk._logs._internal import (
|
||||
LogData,
|
||||
LogDroppedAttributesWarning,
|
||||
Logger,
|
||||
LoggerProvider,
|
||||
LoggingHandler,
|
||||
LogLimits,
|
||||
LogRecord,
|
||||
LogRecordProcessor,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"LogData",
|
||||
"Logger",
|
||||
"LoggerProvider",
|
||||
"LoggingHandler",
|
||||
"LogLimits",
|
||||
"LogRecord",
|
||||
"LogRecordProcessor",
|
||||
"LogDroppedAttributesWarning",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,715 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import atexit
|
||||
import concurrent.futures
|
||||
import json
|
||||
import logging
|
||||
import threading
|
||||
import traceback
|
||||
import warnings
|
||||
from os import environ
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Any, Callable, Tuple, Union # noqa
|
||||
|
||||
from opentelemetry._logs import Logger as APILogger
|
||||
from opentelemetry._logs import LoggerProvider as APILoggerProvider
|
||||
from opentelemetry._logs import LogRecord as APILogRecord
|
||||
from opentelemetry._logs import (
|
||||
NoOpLogger,
|
||||
SeverityNumber,
|
||||
get_logger,
|
||||
get_logger_provider,
|
||||
std_to_otel,
|
||||
)
|
||||
from opentelemetry.attributes import _VALID_ANY_VALUE_TYPES, BoundedAttributes
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_ATTRIBUTE_COUNT_LIMIT,
|
||||
OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
|
||||
OTEL_SDK_DISABLED,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util import ns_to_iso_str
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.semconv.trace import SpanAttributes
|
||||
from opentelemetry.trace import (
|
||||
format_span_id,
|
||||
format_trace_id,
|
||||
get_current_span,
|
||||
)
|
||||
from opentelemetry.trace.span import TraceFlags
|
||||
from opentelemetry.util.types import AnyValue, Attributes
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT = 128
|
||||
_ENV_VALUE_UNSET = ""
|
||||
|
||||
|
||||
class LogDroppedAttributesWarning(UserWarning):
|
||||
"""Custom warning to indicate dropped log attributes due to limits.
|
||||
|
||||
This class is used to filter and handle these specific warnings separately
|
||||
from other warnings, ensuring that they are only shown once without
|
||||
interfering with default user warnings.
|
||||
"""
|
||||
|
||||
|
||||
warnings.simplefilter("once", LogDroppedAttributesWarning)
|
||||
|
||||
|
||||
class LogLimits:
|
||||
"""This class is based on a SpanLimits class in the Tracing module.
|
||||
|
||||
This class represents the limits that should be enforced on recorded data such as events, links, attributes etc.
|
||||
|
||||
This class does not enforce any limits itself. It only provides a way to read limits from env,
|
||||
default values and from user provided arguments.
|
||||
|
||||
All limit arguments must be either a non-negative integer, ``None`` or ``LogLimits.UNSET``.
|
||||
|
||||
- All limit arguments are optional.
|
||||
- If a limit argument is not set, the class will try to read its value from the corresponding
|
||||
environment variable.
|
||||
- If the environment variable is not set, the default value, if any, will be used.
|
||||
|
||||
Limit precedence:
|
||||
|
||||
- If a model specific limit is set, it will be used.
|
||||
- Else if the corresponding global limit is set, it will be used.
|
||||
- Else if the model specific limit has a default value, the default value will be used.
|
||||
- Else if the global limit has a default value, the default value will be used.
|
||||
|
||||
Args:
|
||||
max_attributes: Maximum number of attributes that can be added to a span, event, and link.
|
||||
Environment variable: ``OTEL_ATTRIBUTE_COUNT_LIMIT``
|
||||
Default: {_DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT}
|
||||
max_attribute_length: Maximum length an attribute value can have. Values longer than
|
||||
the specified length will be truncated.
|
||||
"""
|
||||
|
||||
UNSET = -1
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
max_attributes: int | None = None,
|
||||
max_attribute_length: int | None = None,
|
||||
):
|
||||
# attribute count
|
||||
global_max_attributes = self._from_env_if_absent(
|
||||
max_attributes, OTEL_ATTRIBUTE_COUNT_LIMIT
|
||||
)
|
||||
self.max_attributes = (
|
||||
global_max_attributes
|
||||
if global_max_attributes is not None
|
||||
else _DEFAULT_OTEL_ATTRIBUTE_COUNT_LIMIT
|
||||
)
|
||||
|
||||
# attribute length
|
||||
self.max_attribute_length = self._from_env_if_absent(
|
||||
max_attribute_length,
|
||||
OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT,
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return f"{type(self).__name__}(max_attributes={self.max_attributes}, max_attribute_length={self.max_attribute_length})"
|
||||
|
||||
@classmethod
|
||||
def _from_env_if_absent(
|
||||
cls, value: int | None, env_var: str, default: int | None = None
|
||||
) -> int | None:
|
||||
if value == cls.UNSET:
|
||||
return None
|
||||
|
||||
err_msg = "{} must be a non-negative integer but got {}"
|
||||
|
||||
# if no value is provided for the limit, try to load it from env
|
||||
if value is None:
|
||||
# return default value if env var is not set
|
||||
if env_var not in environ:
|
||||
return default
|
||||
|
||||
str_value = environ.get(env_var, "").strip().lower()
|
||||
if str_value == _ENV_VALUE_UNSET:
|
||||
return None
|
||||
|
||||
try:
|
||||
value = int(str_value)
|
||||
except ValueError:
|
||||
raise ValueError(err_msg.format(env_var, str_value))
|
||||
|
||||
if value < 0:
|
||||
raise ValueError(err_msg.format(env_var, value))
|
||||
return value
|
||||
|
||||
|
||||
_UnsetLogLimits = LogLimits(
|
||||
max_attributes=LogLimits.UNSET,
|
||||
max_attribute_length=LogLimits.UNSET,
|
||||
)
|
||||
|
||||
|
||||
class LogRecord(APILogRecord):
|
||||
"""A LogRecord instance represents an event being logged.
|
||||
|
||||
LogRecord instances are created and emitted via `Logger`
|
||||
every time something is logged. They contain all the information
|
||||
pertinent to the event being logged.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
timestamp: int | None = None,
|
||||
observed_timestamp: int | None = None,
|
||||
trace_id: int | None = None,
|
||||
span_id: int | None = None,
|
||||
trace_flags: TraceFlags | None = None,
|
||||
severity_text: str | None = None,
|
||||
severity_number: SeverityNumber | None = None,
|
||||
body: AnyValue | None = None,
|
||||
resource: Resource | None = None,
|
||||
attributes: Attributes | None = None,
|
||||
limits: LogLimits | None = _UnsetLogLimits,
|
||||
):
|
||||
super().__init__(
|
||||
**{
|
||||
"timestamp": timestamp,
|
||||
"observed_timestamp": observed_timestamp,
|
||||
"trace_id": trace_id,
|
||||
"span_id": span_id,
|
||||
"trace_flags": trace_flags,
|
||||
"severity_text": severity_text,
|
||||
"severity_number": severity_number,
|
||||
"body": body,
|
||||
"attributes": BoundedAttributes(
|
||||
maxlen=limits.max_attributes,
|
||||
attributes=attributes if bool(attributes) else None,
|
||||
immutable=False,
|
||||
max_value_len=limits.max_attribute_length,
|
||||
),
|
||||
}
|
||||
)
|
||||
self.resource = (
|
||||
resource if isinstance(resource, Resource) else Resource.create({})
|
||||
)
|
||||
if self.dropped_attributes > 0:
|
||||
warnings.warn(
|
||||
"Log record attributes were dropped due to limits",
|
||||
LogDroppedAttributesWarning,
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
def __eq__(self, other: object) -> bool:
|
||||
if not isinstance(other, LogRecord):
|
||||
return NotImplemented
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def to_json(self, indent: int | None = 4) -> str:
|
||||
return json.dumps(
|
||||
{
|
||||
"body": self.body,
|
||||
"severity_number": self.severity_number.value
|
||||
if self.severity_number is not None
|
||||
else None,
|
||||
"severity_text": self.severity_text,
|
||||
"attributes": (
|
||||
dict(self.attributes) if bool(self.attributes) else None
|
||||
),
|
||||
"dropped_attributes": self.dropped_attributes,
|
||||
"timestamp": ns_to_iso_str(self.timestamp),
|
||||
"observed_timestamp": ns_to_iso_str(self.observed_timestamp),
|
||||
"trace_id": (
|
||||
f"0x{format_trace_id(self.trace_id)}"
|
||||
if self.trace_id is not None
|
||||
else ""
|
||||
),
|
||||
"span_id": (
|
||||
f"0x{format_span_id(self.span_id)}"
|
||||
if self.span_id is not None
|
||||
else ""
|
||||
),
|
||||
"trace_flags": self.trace_flags,
|
||||
"resource": json.loads(self.resource.to_json()),
|
||||
},
|
||||
indent=indent,
|
||||
)
|
||||
|
||||
@property
|
||||
def dropped_attributes(self) -> int:
|
||||
if self.attributes:
|
||||
return self.attributes.dropped
|
||||
return 0
|
||||
|
||||
|
||||
class LogData:
|
||||
"""Readable LogRecord data plus associated InstrumentationLibrary."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
log_record: LogRecord,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
):
|
||||
self.log_record = log_record
|
||||
self.instrumentation_scope = instrumentation_scope
|
||||
|
||||
|
||||
class LogRecordProcessor(abc.ABC):
|
||||
"""Interface to hook the log record emitting action.
|
||||
|
||||
Log processors can be registered directly using
|
||||
:func:`LoggerProvider.add_log_record_processor` and they are invoked
|
||||
in the same order as they were registered.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def emit(self, log_data: LogData):
|
||||
"""Emits the `LogData`"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def shutdown(self):
|
||||
"""Called when a :class:`opentelemetry.sdk._logs.Logger` is shutdown"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def force_flush(self, timeout_millis: int = 30000):
|
||||
"""Export all the received logs to the configured Exporter that have not yet
|
||||
been exported.
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported.
|
||||
|
||||
Returns:
|
||||
False if the timeout is exceeded, True otherwise.
|
||||
"""
|
||||
|
||||
|
||||
# Temporary fix until https://github.com/PyCQA/pylint/issues/4098 is resolved
|
||||
# pylint:disable=no-member
|
||||
class SynchronousMultiLogRecordProcessor(LogRecordProcessor):
|
||||
"""Implementation of class:`LogRecordProcessor` that forwards all received
|
||||
events to a list of log processors sequentially.
|
||||
|
||||
The underlying log processors are called in sequential order as they were
|
||||
added.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
# use a tuple to avoid race conditions when adding a new log and
|
||||
# iterating through it on "emit".
|
||||
self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...]
|
||||
self._lock = threading.Lock()
|
||||
|
||||
def add_log_record_processor(
|
||||
self, log_record_processor: LogRecordProcessor
|
||||
) -> None:
|
||||
"""Adds a Logprocessor to the list of log processors handled by this instance"""
|
||||
with self._lock:
|
||||
self._log_record_processors += (log_record_processor,)
|
||||
|
||||
def emit(self, log_data: LogData) -> None:
|
||||
for lp in self._log_record_processors:
|
||||
lp.emit(log_data)
|
||||
|
||||
def shutdown(self) -> None:
|
||||
"""Shutdown the log processors one by one"""
|
||||
for lp in self._log_record_processors:
|
||||
lp.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Force flush the log processors one by one
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported. If the first n log processors exceeded the timeout
|
||||
then remaining log processors will not be flushed.
|
||||
|
||||
Returns:
|
||||
True if all the log processors flushes the logs within timeout,
|
||||
False otherwise.
|
||||
"""
|
||||
deadline_ns = time_ns() + timeout_millis * 1000000
|
||||
for lp in self._log_record_processors:
|
||||
current_ts = time_ns()
|
||||
if current_ts >= deadline_ns:
|
||||
return False
|
||||
|
||||
if not lp.force_flush((deadline_ns - current_ts) // 1000000):
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
class ConcurrentMultiLogRecordProcessor(LogRecordProcessor):
|
||||
"""Implementation of :class:`LogRecordProcessor` that forwards all received
|
||||
events to a list of log processors in parallel.
|
||||
|
||||
Calls to the underlying log processors are forwarded in parallel by
|
||||
submitting them to a thread pool executor and waiting until each log
|
||||
processor finished its work.
|
||||
|
||||
Args:
|
||||
max_workers: The number of threads managed by the thread pool executor
|
||||
and thus defining how many log processors can work in parallel.
|
||||
"""
|
||||
|
||||
def __init__(self, max_workers: int = 2):
|
||||
# use a tuple to avoid race conditions when adding a new log and
|
||||
# iterating through it on "emit".
|
||||
self._log_record_processors = () # type: Tuple[LogRecordProcessor, ...]
|
||||
self._lock = threading.Lock()
|
||||
self._executor = concurrent.futures.ThreadPoolExecutor(
|
||||
max_workers=max_workers
|
||||
)
|
||||
|
||||
def add_log_record_processor(
|
||||
self, log_record_processor: LogRecordProcessor
|
||||
):
|
||||
with self._lock:
|
||||
self._log_record_processors += (log_record_processor,)
|
||||
|
||||
def _submit_and_wait(
|
||||
self,
|
||||
func: Callable[[LogRecordProcessor], Callable[..., None]],
|
||||
*args: Any,
|
||||
**kwargs: Any,
|
||||
):
|
||||
futures = []
|
||||
for lp in self._log_record_processors:
|
||||
future = self._executor.submit(func(lp), *args, **kwargs)
|
||||
futures.append(future)
|
||||
for future in futures:
|
||||
future.result()
|
||||
|
||||
def emit(self, log_data: LogData):
|
||||
self._submit_and_wait(lambda lp: lp.emit, log_data)
|
||||
|
||||
def shutdown(self):
|
||||
self._submit_and_wait(lambda lp: lp.shutdown)
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Force flush the log processors in parallel.
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported.
|
||||
|
||||
Returns:
|
||||
True if all the log processors flushes the logs within timeout,
|
||||
False otherwise.
|
||||
"""
|
||||
futures = []
|
||||
for lp in self._log_record_processors:
|
||||
future = self._executor.submit(lp.force_flush, timeout_millis)
|
||||
futures.append(future)
|
||||
|
||||
done_futures, not_done_futures = concurrent.futures.wait(
|
||||
futures, timeout_millis / 1e3
|
||||
)
|
||||
|
||||
if not_done_futures:
|
||||
return False
|
||||
|
||||
for future in done_futures:
|
||||
if not future.result():
|
||||
return False
|
||||
|
||||
return True
|
||||
|
||||
|
||||
# skip natural LogRecord attributes
|
||||
# http://docs.python.org/library/logging.html#logrecord-attributes
|
||||
_RESERVED_ATTRS = frozenset(
|
||||
(
|
||||
"asctime",
|
||||
"args",
|
||||
"created",
|
||||
"exc_info",
|
||||
"exc_text",
|
||||
"filename",
|
||||
"funcName",
|
||||
"getMessage",
|
||||
"message",
|
||||
"levelname",
|
||||
"levelno",
|
||||
"lineno",
|
||||
"module",
|
||||
"msecs",
|
||||
"msg",
|
||||
"name",
|
||||
"pathname",
|
||||
"process",
|
||||
"processName",
|
||||
"relativeCreated",
|
||||
"stack_info",
|
||||
"thread",
|
||||
"threadName",
|
||||
"taskName",
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class LoggingHandler(logging.Handler):
|
||||
"""A handler class which writes logging records, in OTLP format, to
|
||||
a network destination or file. Supports signals from the `logging` module.
|
||||
https://docs.python.org/3/library/logging.html
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
level=logging.NOTSET,
|
||||
logger_provider=None,
|
||||
) -> None:
|
||||
super().__init__(level=level)
|
||||
self._logger_provider = logger_provider or get_logger_provider()
|
||||
|
||||
@staticmethod
|
||||
def _get_attributes(record: logging.LogRecord) -> Attributes:
|
||||
attributes = {
|
||||
k: v for k, v in vars(record).items() if k not in _RESERVED_ATTRS
|
||||
}
|
||||
|
||||
# Add standard code attributes for logs.
|
||||
attributes[SpanAttributes.CODE_FILEPATH] = record.pathname
|
||||
attributes[SpanAttributes.CODE_FUNCTION] = record.funcName
|
||||
attributes[SpanAttributes.CODE_LINENO] = record.lineno
|
||||
|
||||
if record.exc_info:
|
||||
exctype, value, tb = record.exc_info
|
||||
if exctype is not None:
|
||||
attributes[SpanAttributes.EXCEPTION_TYPE] = exctype.__name__
|
||||
if value is not None and value.args:
|
||||
attributes[SpanAttributes.EXCEPTION_MESSAGE] = str(
|
||||
value.args[0]
|
||||
)
|
||||
if tb is not None:
|
||||
# https://github.com/open-telemetry/opentelemetry-specification/blob/9fa7c656b26647b27e485a6af7e38dc716eba98a/specification/trace/semantic_conventions/exceptions.md#stacktrace-representation
|
||||
attributes[SpanAttributes.EXCEPTION_STACKTRACE] = "".join(
|
||||
traceback.format_exception(*record.exc_info)
|
||||
)
|
||||
return attributes
|
||||
|
||||
def _translate(self, record: logging.LogRecord) -> LogRecord:
|
||||
timestamp = int(record.created * 1e9)
|
||||
observered_timestamp = time_ns()
|
||||
span_context = get_current_span().get_span_context()
|
||||
attributes = self._get_attributes(record)
|
||||
severity_number = std_to_otel(record.levelno)
|
||||
if self.formatter:
|
||||
body = self.format(record)
|
||||
else:
|
||||
# `record.getMessage()` uses `record.msg` as a template to format
|
||||
# `record.args` into. There is a special case in `record.getMessage()`
|
||||
# where it will only attempt formatting if args are provided,
|
||||
# otherwise, it just stringifies `record.msg`.
|
||||
#
|
||||
# Since the OTLP body field has a type of 'any' and the logging module
|
||||
# is sometimes used in such a way that objects incorrectly end up
|
||||
# set as record.msg, in those cases we would like to bypass
|
||||
# `record.getMessage()` completely and set the body to the object
|
||||
# itself instead of its string representation.
|
||||
# For more background, see: https://github.com/open-telemetry/opentelemetry-python/pull/4216
|
||||
if not record.args and not isinstance(record.msg, str):
|
||||
# if record.msg is not a value we can export, cast it to string
|
||||
if not isinstance(record.msg, _VALID_ANY_VALUE_TYPES):
|
||||
body = str(record.msg)
|
||||
else:
|
||||
body = record.msg
|
||||
else:
|
||||
body = record.getMessage()
|
||||
|
||||
# related to https://github.com/open-telemetry/opentelemetry-python/issues/3548
|
||||
# Severity Text = WARN as defined in https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/logs/data-model.md#displaying-severity.
|
||||
level_name = (
|
||||
"WARN" if record.levelname == "WARNING" else record.levelname
|
||||
)
|
||||
|
||||
logger = get_logger(record.name, logger_provider=self._logger_provider)
|
||||
return LogRecord(
|
||||
timestamp=timestamp,
|
||||
observed_timestamp=observered_timestamp,
|
||||
trace_id=span_context.trace_id,
|
||||
span_id=span_context.span_id,
|
||||
trace_flags=span_context.trace_flags,
|
||||
severity_text=level_name,
|
||||
severity_number=severity_number,
|
||||
body=body,
|
||||
resource=logger.resource,
|
||||
attributes=attributes,
|
||||
)
|
||||
|
||||
def emit(self, record: logging.LogRecord) -> None:
|
||||
"""
|
||||
Emit a record. Skip emitting if logger is NoOp.
|
||||
|
||||
The record is translated to OTel format, and then sent across the pipeline.
|
||||
"""
|
||||
logger = get_logger(record.name, logger_provider=self._logger_provider)
|
||||
if not isinstance(logger, NoOpLogger):
|
||||
logger.emit(self._translate(record))
|
||||
|
||||
def flush(self) -> None:
|
||||
"""
|
||||
Flushes the logging output. Skip flushing if logging_provider has no force_flush method.
|
||||
"""
|
||||
if hasattr(self._logger_provider, "force_flush") and callable(
|
||||
self._logger_provider.force_flush
|
||||
):
|
||||
self._logger_provider.force_flush()
|
||||
|
||||
|
||||
class Logger(APILogger):
|
||||
def __init__(
|
||||
self,
|
||||
resource: Resource,
|
||||
multi_log_record_processor: Union[
|
||||
SynchronousMultiLogRecordProcessor,
|
||||
ConcurrentMultiLogRecordProcessor,
|
||||
],
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
):
|
||||
super().__init__(
|
||||
instrumentation_scope.name,
|
||||
instrumentation_scope.version,
|
||||
instrumentation_scope.schema_url,
|
||||
instrumentation_scope.attributes,
|
||||
)
|
||||
self._resource = resource
|
||||
self._multi_log_record_processor = multi_log_record_processor
|
||||
self._instrumentation_scope = instrumentation_scope
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return self._resource
|
||||
|
||||
def emit(self, record: LogRecord):
|
||||
"""Emits the :class:`LogData` by associating :class:`LogRecord`
|
||||
and instrumentation info.
|
||||
"""
|
||||
log_data = LogData(record, self._instrumentation_scope)
|
||||
self._multi_log_record_processor.emit(log_data)
|
||||
|
||||
|
||||
class LoggerProvider(APILoggerProvider):
|
||||
def __init__(
|
||||
self,
|
||||
resource: Resource | None = None,
|
||||
shutdown_on_exit: bool = True,
|
||||
multi_log_record_processor: SynchronousMultiLogRecordProcessor
|
||||
| ConcurrentMultiLogRecordProcessor
|
||||
| None = None,
|
||||
):
|
||||
if resource is None:
|
||||
self._resource = Resource.create({})
|
||||
else:
|
||||
self._resource = resource
|
||||
self._multi_log_record_processor = (
|
||||
multi_log_record_processor or SynchronousMultiLogRecordProcessor()
|
||||
)
|
||||
disabled = environ.get(OTEL_SDK_DISABLED, "")
|
||||
self._disabled = disabled.lower().strip() == "true"
|
||||
self._at_exit_handler = None
|
||||
if shutdown_on_exit:
|
||||
self._at_exit_handler = atexit.register(self.shutdown)
|
||||
self._logger_cache = {}
|
||||
self._logger_cache_lock = Lock()
|
||||
|
||||
@property
|
||||
def resource(self):
|
||||
return self._resource
|
||||
|
||||
def _get_logger_no_cache(
|
||||
self,
|
||||
name: str,
|
||||
version: str | None = None,
|
||||
schema_url: str | None = None,
|
||||
attributes: Attributes | None = None,
|
||||
) -> Logger:
|
||||
return Logger(
|
||||
self._resource,
|
||||
self._multi_log_record_processor,
|
||||
InstrumentationScope(
|
||||
name,
|
||||
version,
|
||||
schema_url,
|
||||
attributes,
|
||||
),
|
||||
)
|
||||
|
||||
def _get_logger_cached(
|
||||
self,
|
||||
name: str,
|
||||
version: str | None = None,
|
||||
schema_url: str | None = None,
|
||||
) -> Logger:
|
||||
with self._logger_cache_lock:
|
||||
key = (name, version, schema_url)
|
||||
if key in self._logger_cache:
|
||||
return self._logger_cache[key]
|
||||
|
||||
self._logger_cache[key] = self._get_logger_no_cache(
|
||||
name, version, schema_url
|
||||
)
|
||||
return self._logger_cache[key]
|
||||
|
||||
def get_logger(
|
||||
self,
|
||||
name: str,
|
||||
version: str | None = None,
|
||||
schema_url: str | None = None,
|
||||
attributes: Attributes | None = None,
|
||||
) -> Logger:
|
||||
if self._disabled:
|
||||
return NoOpLogger(
|
||||
name,
|
||||
version=version,
|
||||
schema_url=schema_url,
|
||||
attributes=attributes,
|
||||
)
|
||||
if attributes is None:
|
||||
return self._get_logger_cached(name, version, schema_url)
|
||||
return self._get_logger_no_cache(name, version, schema_url, attributes)
|
||||
|
||||
def add_log_record_processor(
|
||||
self, log_record_processor: LogRecordProcessor
|
||||
):
|
||||
"""Registers a new :class:`LogRecordProcessor` for this `LoggerProvider` instance.
|
||||
|
||||
The log processors are invoked in the same order they are registered.
|
||||
"""
|
||||
self._multi_log_record_processor.add_log_record_processor(
|
||||
log_record_processor
|
||||
)
|
||||
|
||||
def shutdown(self):
|
||||
"""Shuts down the log processors."""
|
||||
self._multi_log_record_processor.shutdown()
|
||||
if self._at_exit_handler is not None:
|
||||
atexit.unregister(self._at_exit_handler)
|
||||
self._at_exit_handler = None
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool:
|
||||
"""Force flush the log processors.
|
||||
|
||||
Args:
|
||||
timeout_millis: The maximum amount of time to wait for logs to be
|
||||
exported.
|
||||
|
||||
Returns:
|
||||
True if all the log processors flushes the logs within timeout,
|
||||
False otherwise.
|
||||
"""
|
||||
return self._multi_log_record_processor.force_flush(timeout_millis)
|
||||
Binary file not shown.
@@ -0,0 +1,464 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from __future__ import annotations
|
||||
|
||||
import abc
|
||||
import collections
|
||||
import enum
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import weakref
|
||||
from os import environ, linesep
|
||||
from time import time_ns
|
||||
from typing import IO, Callable, Deque, List, Optional, Sequence
|
||||
|
||||
from opentelemetry.context import (
|
||||
_SUPPRESS_INSTRUMENTATION_KEY,
|
||||
attach,
|
||||
detach,
|
||||
set_value,
|
||||
)
|
||||
from opentelemetry.sdk._logs import LogData, LogRecord, LogRecordProcessor
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_BLRP_EXPORT_TIMEOUT,
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
|
||||
OTEL_BLRP_MAX_QUEUE_SIZE,
|
||||
OTEL_BLRP_SCHEDULE_DELAY,
|
||||
)
|
||||
from opentelemetry.util._once import Once
|
||||
|
||||
_DEFAULT_SCHEDULE_DELAY_MILLIS = 5000
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE = 512
|
||||
_DEFAULT_EXPORT_TIMEOUT_MILLIS = 30000
|
||||
_DEFAULT_MAX_QUEUE_SIZE = 2048
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE = (
|
||||
"Unable to parse value for %s as integer. Defaulting to %s."
|
||||
)
|
||||
|
||||
_logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LogExportResult(enum.Enum):
|
||||
SUCCESS = 0
|
||||
FAILURE = 1
|
||||
|
||||
|
||||
class LogExporter(abc.ABC):
|
||||
"""Interface for exporting logs.
|
||||
|
||||
Interface to be implemented by services that want to export logs received
|
||||
in their own format.
|
||||
|
||||
To export data this MUST be registered to the :class`opentelemetry.sdk._logs.Logger` using a
|
||||
log processor.
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def export(self, batch: Sequence[LogData]):
|
||||
"""Exports a batch of logs.
|
||||
|
||||
Args:
|
||||
batch: The list of `LogData` objects to be exported
|
||||
|
||||
Returns:
|
||||
The result of the export
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def shutdown(self):
|
||||
"""Shuts down the exporter.
|
||||
|
||||
Called when the SDK is shut down.
|
||||
"""
|
||||
|
||||
|
||||
class ConsoleLogExporter(LogExporter):
|
||||
"""Implementation of :class:`LogExporter` that prints log records to the
|
||||
console.
|
||||
|
||||
This class can be used for diagnostic purposes. It prints the exported
|
||||
log records to the console STDOUT.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
out: IO = sys.stdout,
|
||||
formatter: Callable[[LogRecord], str] = lambda record: record.to_json()
|
||||
+ linesep,
|
||||
):
|
||||
self.out = out
|
||||
self.formatter = formatter
|
||||
|
||||
def export(self, batch: Sequence[LogData]):
|
||||
for data in batch:
|
||||
self.out.write(self.formatter(data.log_record))
|
||||
self.out.flush()
|
||||
return LogExportResult.SUCCESS
|
||||
|
||||
def shutdown(self):
|
||||
pass
|
||||
|
||||
|
||||
class SimpleLogRecordProcessor(LogRecordProcessor):
|
||||
"""This is an implementation of LogRecordProcessor which passes
|
||||
received logs in the export-friendly LogData representation to the
|
||||
configured LogExporter, as soon as they are emitted.
|
||||
"""
|
||||
|
||||
def __init__(self, exporter: LogExporter):
|
||||
self._exporter = exporter
|
||||
self._shutdown = False
|
||||
|
||||
def emit(self, log_data: LogData):
|
||||
if self._shutdown:
|
||||
_logger.warning("Processor is already shutdown, ignoring call")
|
||||
return
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
try:
|
||||
self._exporter.export((log_data,))
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.exception("Exception while exporting logs.")
|
||||
detach(token)
|
||||
|
||||
def shutdown(self):
|
||||
self._shutdown = True
|
||||
self._exporter.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: int = 30000) -> bool: # pylint: disable=no-self-use
|
||||
return True
|
||||
|
||||
|
||||
class _FlushRequest:
|
||||
__slots__ = ["event", "num_log_records"]
|
||||
|
||||
def __init__(self):
|
||||
self.event = threading.Event()
|
||||
self.num_log_records = 0
|
||||
|
||||
|
||||
_BSP_RESET_ONCE = Once()
|
||||
|
||||
|
||||
class BatchLogRecordProcessor(LogRecordProcessor):
|
||||
"""This is an implementation of LogRecordProcessor which creates batches of
|
||||
received logs in the export-friendly LogData representation and
|
||||
send to the configured LogExporter, as soon as they are emitted.
|
||||
|
||||
`BatchLogRecordProcessor` is configurable with the following environment
|
||||
variables which correspond to constructor parameters:
|
||||
|
||||
- :envvar:`OTEL_BLRP_SCHEDULE_DELAY`
|
||||
- :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE`
|
||||
- :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE`
|
||||
- :envvar:`OTEL_BLRP_EXPORT_TIMEOUT`
|
||||
"""
|
||||
|
||||
_queue: Deque[LogData]
|
||||
_flush_request: _FlushRequest | None
|
||||
_log_records: List[LogData | None]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
exporter: LogExporter,
|
||||
schedule_delay_millis: float | None = None,
|
||||
max_export_batch_size: int | None = None,
|
||||
export_timeout_millis: float | None = None,
|
||||
max_queue_size: int | None = None,
|
||||
):
|
||||
if max_queue_size is None:
|
||||
max_queue_size = BatchLogRecordProcessor._default_max_queue_size()
|
||||
|
||||
if schedule_delay_millis is None:
|
||||
schedule_delay_millis = (
|
||||
BatchLogRecordProcessor._default_schedule_delay_millis()
|
||||
)
|
||||
|
||||
if max_export_batch_size is None:
|
||||
max_export_batch_size = (
|
||||
BatchLogRecordProcessor._default_max_export_batch_size()
|
||||
)
|
||||
|
||||
if export_timeout_millis is None:
|
||||
export_timeout_millis = (
|
||||
BatchLogRecordProcessor._default_export_timeout_millis()
|
||||
)
|
||||
|
||||
BatchLogRecordProcessor._validate_arguments(
|
||||
max_queue_size, schedule_delay_millis, max_export_batch_size
|
||||
)
|
||||
|
||||
self._exporter = exporter
|
||||
self._max_queue_size = max_queue_size
|
||||
self._schedule_delay_millis = schedule_delay_millis
|
||||
self._max_export_batch_size = max_export_batch_size
|
||||
self._export_timeout_millis = export_timeout_millis
|
||||
self._queue = collections.deque([], max_queue_size)
|
||||
self._worker_thread = threading.Thread(
|
||||
name="OtelBatchLogRecordProcessor",
|
||||
target=self.worker,
|
||||
daemon=True,
|
||||
)
|
||||
self._condition = threading.Condition(threading.Lock())
|
||||
self._shutdown = False
|
||||
self._flush_request = None
|
||||
self._log_records = [None] * self._max_export_batch_size
|
||||
self._worker_thread.start()
|
||||
if hasattr(os, "register_at_fork"):
|
||||
weak_reinit = weakref.WeakMethod(self._at_fork_reinit)
|
||||
os.register_at_fork(after_in_child=lambda: weak_reinit()()) # pylint: disable=unnecessary-lambda
|
||||
self._pid = os.getpid()
|
||||
|
||||
def _at_fork_reinit(self):
|
||||
self._condition = threading.Condition(threading.Lock())
|
||||
self._queue.clear()
|
||||
self._worker_thread = threading.Thread(
|
||||
name="OtelBatchLogRecordProcessor",
|
||||
target=self.worker,
|
||||
daemon=True,
|
||||
)
|
||||
self._worker_thread.start()
|
||||
self._pid = os.getpid()
|
||||
|
||||
def worker(self):
|
||||
timeout = self._schedule_delay_millis / 1e3
|
||||
flush_request: Optional[_FlushRequest] = None
|
||||
while not self._shutdown:
|
||||
with self._condition:
|
||||
if self._shutdown:
|
||||
# shutdown may have been called, avoid further processing
|
||||
break
|
||||
flush_request = self._get_and_unset_flush_request()
|
||||
if (
|
||||
len(self._queue) < self._max_export_batch_size
|
||||
and flush_request is None
|
||||
):
|
||||
self._condition.wait(timeout)
|
||||
|
||||
flush_request = self._get_and_unset_flush_request()
|
||||
if not self._queue:
|
||||
timeout = self._schedule_delay_millis / 1e3
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
flush_request = None
|
||||
continue
|
||||
if self._shutdown:
|
||||
break
|
||||
|
||||
start_ns = time_ns()
|
||||
self._export(flush_request)
|
||||
end_ns = time_ns()
|
||||
# subtract the duration of this export call to the next timeout
|
||||
timeout = self._schedule_delay_millis / 1e3 - (
|
||||
(end_ns - start_ns) / 1e9
|
||||
)
|
||||
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
flush_request = None
|
||||
|
||||
# there might have been a new flush request while export was running
|
||||
# and before the done flag switched to true
|
||||
with self._condition:
|
||||
shutdown_flush_request = self._get_and_unset_flush_request()
|
||||
|
||||
# flush the remaining logs
|
||||
self._drain_queue()
|
||||
self._notify_flush_request_finished(flush_request)
|
||||
self._notify_flush_request_finished(shutdown_flush_request)
|
||||
|
||||
def _export(self, flush_request: Optional[_FlushRequest] = None):
|
||||
"""Exports logs considering the given flush_request.
|
||||
|
||||
If flush_request is not None then logs are exported in batches
|
||||
until the number of exported logs reached or exceeded the num of logs in
|
||||
flush_request, otherwise exports at max max_export_batch_size logs.
|
||||
"""
|
||||
if flush_request is None:
|
||||
self._export_batch()
|
||||
return
|
||||
|
||||
num_log_records = flush_request.num_log_records
|
||||
while self._queue:
|
||||
exported = self._export_batch()
|
||||
num_log_records -= exported
|
||||
|
||||
if num_log_records <= 0:
|
||||
break
|
||||
|
||||
def _export_batch(self) -> int:
|
||||
"""Exports at most max_export_batch_size logs and returns the number of
|
||||
exported logs.
|
||||
"""
|
||||
idx = 0
|
||||
while idx < self._max_export_batch_size and self._queue:
|
||||
record = self._queue.pop()
|
||||
self._log_records[idx] = record
|
||||
idx += 1
|
||||
token = attach(set_value(_SUPPRESS_INSTRUMENTATION_KEY, True))
|
||||
try:
|
||||
self._exporter.export(self._log_records[:idx]) # type: ignore
|
||||
except Exception: # pylint: disable=broad-exception-caught
|
||||
_logger.exception("Exception while exporting logs.")
|
||||
detach(token)
|
||||
|
||||
for index in range(idx):
|
||||
self._log_records[index] = None
|
||||
return idx
|
||||
|
||||
def _drain_queue(self):
|
||||
"""Export all elements until queue is empty.
|
||||
|
||||
Can only be called from the worker thread context because it invokes
|
||||
`export` that is not thread safe.
|
||||
"""
|
||||
while self._queue:
|
||||
self._export_batch()
|
||||
|
||||
def _get_and_unset_flush_request(self) -> Optional[_FlushRequest]:
|
||||
flush_request = self._flush_request
|
||||
self._flush_request = None
|
||||
if flush_request is not None:
|
||||
flush_request.num_log_records = len(self._queue)
|
||||
return flush_request
|
||||
|
||||
@staticmethod
|
||||
def _notify_flush_request_finished(
|
||||
flush_request: Optional[_FlushRequest] = None,
|
||||
):
|
||||
if flush_request is not None:
|
||||
flush_request.event.set()
|
||||
|
||||
def _get_or_create_flush_request(self) -> _FlushRequest:
|
||||
if self._flush_request is None:
|
||||
self._flush_request = _FlushRequest()
|
||||
return self._flush_request
|
||||
|
||||
def emit(self, log_data: LogData) -> None:
|
||||
"""Adds the `LogData` to queue and notifies the waiting threads
|
||||
when size of queue reaches max_export_batch_size.
|
||||
"""
|
||||
if self._shutdown:
|
||||
return
|
||||
if self._pid != os.getpid():
|
||||
_BSP_RESET_ONCE.do_once(self._at_fork_reinit)
|
||||
|
||||
self._queue.appendleft(log_data)
|
||||
if len(self._queue) >= self._max_export_batch_size:
|
||||
with self._condition:
|
||||
self._condition.notify()
|
||||
|
||||
def shutdown(self):
|
||||
self._shutdown = True
|
||||
with self._condition:
|
||||
self._condition.notify_all()
|
||||
self._worker_thread.join()
|
||||
self._exporter.shutdown()
|
||||
|
||||
def force_flush(self, timeout_millis: Optional[int] = None) -> bool:
|
||||
if timeout_millis is None:
|
||||
timeout_millis = self._export_timeout_millis
|
||||
if self._shutdown:
|
||||
return True
|
||||
|
||||
with self._condition:
|
||||
flush_request = self._get_or_create_flush_request()
|
||||
self._condition.notify_all()
|
||||
|
||||
ret = flush_request.event.wait(timeout_millis / 1e3)
|
||||
if not ret:
|
||||
_logger.warning("Timeout was exceeded in force_flush().")
|
||||
return ret
|
||||
|
||||
@staticmethod
|
||||
def _default_max_queue_size():
|
||||
try:
|
||||
return int(
|
||||
environ.get(OTEL_BLRP_MAX_QUEUE_SIZE, _DEFAULT_MAX_QUEUE_SIZE)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_MAX_QUEUE_SIZE,
|
||||
_DEFAULT_MAX_QUEUE_SIZE,
|
||||
)
|
||||
return _DEFAULT_MAX_QUEUE_SIZE
|
||||
|
||||
@staticmethod
|
||||
def _default_schedule_delay_millis():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BLRP_SCHEDULE_DELAY, _DEFAULT_SCHEDULE_DELAY_MILLIS
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_SCHEDULE_DELAY,
|
||||
_DEFAULT_SCHEDULE_DELAY_MILLIS,
|
||||
)
|
||||
return _DEFAULT_SCHEDULE_DELAY_MILLIS
|
||||
|
||||
@staticmethod
|
||||
def _default_max_export_batch_size():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE,
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE,
|
||||
_DEFAULT_MAX_EXPORT_BATCH_SIZE,
|
||||
)
|
||||
return _DEFAULT_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
@staticmethod
|
||||
def _default_export_timeout_millis():
|
||||
try:
|
||||
return int(
|
||||
environ.get(
|
||||
OTEL_BLRP_EXPORT_TIMEOUT, _DEFAULT_EXPORT_TIMEOUT_MILLIS
|
||||
)
|
||||
)
|
||||
except ValueError:
|
||||
_logger.exception(
|
||||
_ENV_VAR_INT_VALUE_ERROR_MESSAGE,
|
||||
OTEL_BLRP_EXPORT_TIMEOUT,
|
||||
_DEFAULT_EXPORT_TIMEOUT_MILLIS,
|
||||
)
|
||||
return _DEFAULT_EXPORT_TIMEOUT_MILLIS
|
||||
|
||||
@staticmethod
|
||||
def _validate_arguments(
|
||||
max_queue_size, schedule_delay_millis, max_export_batch_size
|
||||
):
|
||||
if max_queue_size <= 0:
|
||||
raise ValueError("max_queue_size must be a positive integer.")
|
||||
|
||||
if schedule_delay_millis <= 0:
|
||||
raise ValueError("schedule_delay_millis must be positive.")
|
||||
|
||||
if max_export_batch_size <= 0:
|
||||
raise ValueError(
|
||||
"max_export_batch_size must be a positive integer."
|
||||
)
|
||||
|
||||
if max_export_batch_size > max_queue_size:
|
||||
raise ValueError(
|
||||
"max_export_batch_size must be less than or equal to max_queue_size."
|
||||
)
|
||||
Binary file not shown.
Binary file not shown.
@@ -0,0 +1,51 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import threading
|
||||
import typing
|
||||
|
||||
from opentelemetry.sdk._logs import LogData
|
||||
from opentelemetry.sdk._logs.export import LogExporter, LogExportResult
|
||||
|
||||
|
||||
class InMemoryLogExporter(LogExporter):
|
||||
"""Implementation of :class:`.LogExporter` that stores logs in memory.
|
||||
|
||||
This class can be used for testing purposes. It stores the exported logs
|
||||
in a list in memory that can be retrieved using the
|
||||
:func:`.get_finished_logs` method.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._logs = []
|
||||
self._lock = threading.Lock()
|
||||
self._stopped = False
|
||||
|
||||
def clear(self) -> None:
|
||||
with self._lock:
|
||||
self._logs.clear()
|
||||
|
||||
def get_finished_logs(self) -> typing.Tuple[LogData, ...]:
|
||||
with self._lock:
|
||||
return tuple(self._logs)
|
||||
|
||||
def export(self, batch: typing.Sequence[LogData]) -> LogExportResult:
|
||||
if self._stopped:
|
||||
return LogExportResult.FAILURE
|
||||
with self._lock:
|
||||
self._logs.extend(batch)
|
||||
return LogExportResult.SUCCESS
|
||||
|
||||
def shutdown(self) -> None:
|
||||
self._stopped = True
|
||||
@@ -0,0 +1,35 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from opentelemetry.sdk._logs._internal.export import (
|
||||
BatchLogRecordProcessor,
|
||||
ConsoleLogExporter,
|
||||
LogExporter,
|
||||
LogExportResult,
|
||||
SimpleLogRecordProcessor,
|
||||
)
|
||||
|
||||
# The point module is not in the export directory to avoid a circular import.
|
||||
from opentelemetry.sdk._logs._internal.export.in_memory_log_exporter import (
|
||||
InMemoryLogExporter,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"BatchLogRecordProcessor",
|
||||
"ConsoleLogExporter",
|
||||
"LogExporter",
|
||||
"LogExportResult",
|
||||
"SimpleLogRecordProcessor",
|
||||
"InMemoryLogExporter",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,721 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
OTEL_SDK_DISABLED = "OTEL_SDK_DISABLED"
|
||||
"""
|
||||
.. envvar:: OTEL_SDK_DISABLED
|
||||
|
||||
The :envvar:`OTEL_SDK_DISABLED` environment variable disables the SDK for all signals
|
||||
Default: "false"
|
||||
"""
|
||||
|
||||
OTEL_RESOURCE_ATTRIBUTES = "OTEL_RESOURCE_ATTRIBUTES"
|
||||
"""
|
||||
.. envvar:: OTEL_RESOURCE_ATTRIBUTES
|
||||
|
||||
The :envvar:`OTEL_RESOURCE_ATTRIBUTES` environment variable allows resource
|
||||
attributes to be passed to the SDK at process invocation. The attributes from
|
||||
:envvar:`OTEL_RESOURCE_ATTRIBUTES` are merged with those passed to
|
||||
`Resource.create`, meaning :envvar:`OTEL_RESOURCE_ATTRIBUTES` takes *lower*
|
||||
priority. Attributes should be in the format ``key1=value1,key2=value2``.
|
||||
Additional details are available `in the specification
|
||||
<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/resource/sdk.md#specifying-resource-information-via-an-environment-variable>`__.
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
$ OTEL_RESOURCE_ATTRIBUTES="service.name=shoppingcard,will_be_overridden=foo" python - <<EOF
|
||||
import pprint
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
pprint.pprint(Resource.create({"will_be_overridden": "bar"}).attributes)
|
||||
EOF
|
||||
{'service.name': 'shoppingcard',
|
||||
'telemetry.sdk.language': 'python',
|
||||
'telemetry.sdk.name': 'opentelemetry',
|
||||
'telemetry.sdk.version': '0.13.dev0',
|
||||
'will_be_overridden': 'bar'}
|
||||
"""
|
||||
|
||||
OTEL_LOG_LEVEL = "OTEL_LOG_LEVEL"
|
||||
"""
|
||||
.. envvar:: OTEL_LOG_LEVEL
|
||||
|
||||
The :envvar:`OTEL_LOG_LEVEL` environment variable sets the log level used by the SDK logger
|
||||
Default: "info"
|
||||
"""
|
||||
|
||||
OTEL_TRACES_SAMPLER = "OTEL_TRACES_SAMPLER"
|
||||
"""
|
||||
.. envvar:: OTEL_TRACES_SAMPLER
|
||||
|
||||
The :envvar:`OTEL_TRACES_SAMPLER` environment variable sets the sampler to be used for traces.
|
||||
Sampling is a mechanism to control the noise introduced by OpenTelemetry by reducing the number
|
||||
of traces collected and sent to the backend
|
||||
Default: "parentbased_always_on"
|
||||
"""
|
||||
|
||||
OTEL_TRACES_SAMPLER_ARG = "OTEL_TRACES_SAMPLER_ARG"
|
||||
"""
|
||||
.. envvar:: OTEL_TRACES_SAMPLER_ARG
|
||||
|
||||
The :envvar:`OTEL_TRACES_SAMPLER_ARG` environment variable will only be used if OTEL_TRACES_SAMPLER is set.
|
||||
Each Sampler type defines its own expected input, if any.
|
||||
Invalid or unrecognized input is ignored,
|
||||
i.e. the SDK behaves as if OTEL_TRACES_SAMPLER_ARG is not set.
|
||||
"""
|
||||
|
||||
OTEL_BLRP_SCHEDULE_DELAY = "OTEL_BLRP_SCHEDULE_DELAY"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_SCHEDULE_DELAY
|
||||
|
||||
The :envvar:`OTEL_BLRP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchLogRecordProcessor.
|
||||
Default: 5000
|
||||
"""
|
||||
|
||||
OTEL_BLRP_EXPORT_TIMEOUT = "OTEL_BLRP_EXPORT_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_EXPORT_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_BLRP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchLogRecordProcessor.
|
||||
Default: 30000
|
||||
"""
|
||||
|
||||
OTEL_BLRP_MAX_QUEUE_SIZE = "OTEL_BLRP_MAX_QUEUE_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_MAX_QUEUE_SIZE
|
||||
|
||||
The :envvar:`OTEL_BLRP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchLogRecordProcessor.
|
||||
Default: 2048
|
||||
"""
|
||||
|
||||
OTEL_BLRP_MAX_EXPORT_BATCH_SIZE = "OTEL_BLRP_MAX_EXPORT_BATCH_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
The :envvar:`OTEL_BLRP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchLogRecordProcessor.
|
||||
Default: 512
|
||||
"""
|
||||
|
||||
OTEL_BSP_SCHEDULE_DELAY = "OTEL_BSP_SCHEDULE_DELAY"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_SCHEDULE_DELAY
|
||||
|
||||
The :envvar:`OTEL_BSP_SCHEDULE_DELAY` represents the delay interval between two consecutive exports of the BatchSpanProcessor.
|
||||
Default: 5000
|
||||
"""
|
||||
|
||||
OTEL_BSP_EXPORT_TIMEOUT = "OTEL_BSP_EXPORT_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_EXPORT_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_BSP_EXPORT_TIMEOUT` represents the maximum allowed time to export data from the BatchSpanProcessor.
|
||||
Default: 30000
|
||||
"""
|
||||
|
||||
OTEL_BSP_MAX_QUEUE_SIZE = "OTEL_BSP_MAX_QUEUE_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_MAX_QUEUE_SIZE
|
||||
|
||||
The :envvar:`OTEL_BSP_MAX_QUEUE_SIZE` represents the maximum queue size for the data export of the BatchSpanProcessor.
|
||||
Default: 2048
|
||||
"""
|
||||
|
||||
OTEL_BSP_MAX_EXPORT_BATCH_SIZE = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE"
|
||||
"""
|
||||
.. envvar:: OTEL_BSP_MAX_EXPORT_BATCH_SIZE
|
||||
|
||||
The :envvar:`OTEL_BSP_MAX_EXPORT_BATCH_SIZE` represents the maximum batch size for the data export of the BatchSpanProcessor.
|
||||
Default: 512
|
||||
"""
|
||||
|
||||
OTEL_ATTRIBUTE_COUNT_LIMIT = "OTEL_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed attribute count for spans, events and links.
|
||||
This limit is overridden by model specific limits such as OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT = "OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT
|
||||
|
||||
The :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed attribute length.
|
||||
"""
|
||||
|
||||
OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT = "OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed event attribute count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_LINK_ATTRIBUTE_COUNT_LIMIT = "OTEL_LINK_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_LINK_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed link attribute count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT = "OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT` represents the maximum allowed span attribute count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT = (
|
||||
"OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT` represents the maximum allowed length
|
||||
span attribute values can have. This takes precedence over :envvar:`OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT`.
|
||||
"""
|
||||
|
||||
OTEL_SPAN_EVENT_COUNT_LIMIT = "OTEL_SPAN_EVENT_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_EVENT_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_EVENT_COUNT_LIMIT` represents the maximum allowed span event count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_SPAN_LINK_COUNT_LIMIT = "OTEL_SPAN_LINK_COUNT_LIMIT"
|
||||
"""
|
||||
.. envvar:: OTEL_SPAN_LINK_COUNT_LIMIT
|
||||
|
||||
The :envvar:`OTEL_SPAN_LINK_COUNT_LIMIT` represents the maximum allowed span link count.
|
||||
Default: 128
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_AGENT_HOST = "OTEL_EXPORTER_JAEGER_AGENT_HOST"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_HOST
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_HOST` represents the hostname for the Jaeger agent.
|
||||
Default: "localhost"
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_AGENT_PORT = "OTEL_EXPORTER_JAEGER_AGENT_PORT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_PORT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_PORT` represents the port for the Jaeger agent.
|
||||
Default: 6831
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_ENDPOINT = "OTEL_EXPORTER_JAEGER_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_ENDPOINT` represents the HTTP endpoint for Jaeger traces.
|
||||
Default: "http://localhost:14250"
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_USER = "OTEL_EXPORTER_JAEGER_USER"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_USER
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_USER` represents the username to be used for HTTP basic authentication.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_PASSWORD = "OTEL_EXPORTER_JAEGER_PASSWORD"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_PASSWORD
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_PASSWORD` represents the password to be used for HTTP basic authentication.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_TIMEOUT = "OTEL_EXPORTER_JAEGER_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_TIMEOUT
|
||||
|
||||
Maximum time the Jaeger exporter will wait for each batch export.
|
||||
Default: 10
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_ZIPKIN_ENDPOINT = "OTEL_EXPORTER_ZIPKIN_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_ZIPKIN_ENDPOINT
|
||||
|
||||
Zipkin collector endpoint to which the exporter will send data. This may
|
||||
include a path (e.g. ``http://example.com:9411/api/v2/spans``).
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_ZIPKIN_TIMEOUT = "OTEL_EXPORTER_ZIPKIN_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_ZIPKIN_TIMEOUT
|
||||
|
||||
Maximum time (in seconds) the Zipkin exporter will wait for each batch export.
|
||||
Default: 10
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_PROTOCOL = "OTEL_EXPORTER_OTLP_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_PROTOCOL` represents the transport protocol for the
|
||||
OTLP exporter.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_PROTOCOL = "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_PROTOCOL` represents the transport protocol for spans.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_PROTOCOL = "OTEL_EXPORTER_OTLP_METRICS_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_PROTOCOL` represents the transport protocol for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_PROTOCOL = "OTEL_EXPORTER_OTLP_LOGS_PROTOCOL"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_PROTOCOL
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_PROTOCOL` represents the transport protocol for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_CERTIFICATE = "OTEL_EXPORTER_OTLP_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client. Should only be used for a secure connection.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_HEADERS = "OTEL_EXPORTER_OTLP_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_HEADERS` contains the key-value pairs to be used as headers
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
|
||||
OTEL_EXPORTER_OTLP_COMPRESSION = "OTEL_EXPORTER_OTLP_COMPRESSION"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_COMPRESSION
|
||||
|
||||
Specifies a gRPC compression method to be used in the OTLP exporters.
|
||||
Possible values are:
|
||||
|
||||
- ``gzip`` corresponding to `grpc.Compression.Gzip`.
|
||||
- ``deflate`` corresponding to `grpc.Compression.Deflate`.
|
||||
|
||||
If no ``OTEL_EXPORTER_OTLP_*COMPRESSION`` environment variable is present or
|
||||
``compression`` argument passed to the exporter, the default
|
||||
`grpc.Compression.NoCompression` will be used. Additional details are
|
||||
available `in the specification
|
||||
<https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#opentelemetry-protocol-exporter>`__.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TIMEOUT = "OTEL_EXPORTER_OTLP_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TIMEOUT` is the maximum time the OTLP exporter will wait for each batch export.
|
||||
Default: 10
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_ENDPOINT = "OTEL_EXPORTER_OTLP_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_ENDPOINT` target to which the exporter is going to send spans or metrics.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over the insecure configuration setting.
|
||||
Default: "http://localhost:4317"
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_INSECURE = "OTEL_EXPORTER_OTLP_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_INSECURE` represents whether to enable client transport security for gRPC requests.
|
||||
A scheme of https takes precedence over this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_INSECURE = "OTEL_EXPORTER_OTLP_TRACES_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_INSECURE` represents whether to enable client transport security
|
||||
for gRPC requests for spans. A scheme of https takes precedence over the this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_ENDPOINT = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` target to which the span exporter is going to send spans.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over this configuration setting.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_ENDPOINT = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` target to which the metrics exporter is going to send metrics.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over this configuration setting.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_ENDPOINT = "OTEL_EXPORTER_OTLP_LOGS_ENDPOINT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_ENDPOINT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_ENDPOINT` target to which the log exporter is going to send logs.
|
||||
The endpoint MUST be a valid URL host, and MAY contain a scheme (http or https), port and path.
|
||||
A scheme of https indicates a secure connection and takes precedence over this configuration setting.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE = "OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for traces. Should only be used for a secure connection for tracing.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for metrics. Should only be used for a secure connection for exporting metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_CLIENT_KEY = "OTEL_EXPORTER_OTLP_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY = "OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format for traces.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY = "OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY` stores the path to the client private key to use
|
||||
in mTLS communication in PEM format for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE = "OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format for traces.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE = (
|
||||
"OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE` stores the path to the client certificate/chain trust for
|
||||
clients private key to use in mTLS communication in PEM format for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_HEADERS = "OTEL_EXPORTER_OTLP_TRACES_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_HEADERS` contains the key-value pairs to be used as headers for spans
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_HEADERS = "OTEL_EXPORTER_OTLP_METRICS_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_HEADERS` contains the key-value pairs to be used as headers for metrics
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_HEADERS = "OTEL_EXPORTER_OTLP_LOGS_HEADERS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_HEADERS
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_HEADERS` contains the key-value pairs to be used as headers for logs
|
||||
associated with gRPC or HTTP requests.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_COMPRESSION = "OTEL_EXPORTER_OTLP_TRACES_COMPRESSION"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_COMPRESSION
|
||||
|
||||
Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the span
|
||||
exporter. If both are present, this takes higher precedence.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_COMPRESSION = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_COMPRESSION"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_COMPRESSION
|
||||
|
||||
Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the metric
|
||||
exporter. If both are present, this takes higher precedence.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_COMPRESSION = "OTEL_EXPORTER_OTLP_LOGS_COMPRESSION"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_COMPRESSION
|
||||
|
||||
Same as :envvar:`OTEL_EXPORTER_OTLP_COMPRESSION` but only for the log
|
||||
exporter. If both are present, this takes higher precedence.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_TRACES_TIMEOUT = "OTEL_EXPORTER_OTLP_TRACES_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_TRACES_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` is the maximum time the OTLP exporter will
|
||||
wait for each batch export for spans.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_TIMEOUT = "OTEL_EXPORTER_OTLP_METRICS_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` is the maximum time the OTLP exporter will
|
||||
wait for each batch export for metrics.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_INSECURE = "OTEL_EXPORTER_OTLP_METRICS_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_INSECURE` represents whether to enable client transport security
|
||||
for gRPC requests for metrics. A scheme of https takes precedence over the this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_INSECURE = "OTEL_EXPORTER_OTLP_LOGS_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_INSECURE` represents whether to enable client transport security
|
||||
for gRPC requests for logs. A scheme of https takes precedence over the this configuration setting.
|
||||
Default: False
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE = "OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for logs. Should only be used for a secure connection for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_LOGS_TIMEOUT = "OTEL_EXPORTER_OTLP_LOGS_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_LOGS_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_LOGS_TIMEOUT` is the maximum time the OTLP exporter will
|
||||
wait for each batch export for logs.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_CERTIFICATE = "OTEL_EXPORTER_JAEGER_CERTIFICATE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_CERTIFICATE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_CERTIFICATE` stores the path to the certificate file for
|
||||
TLS credentials of gRPC client for Jaeger. Should only be used for a secure connection with Jaeger.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES = (
|
||||
"OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_AGENT_SPLIT_OVERSIZED_BATCHES` is a boolean flag to determine whether
|
||||
to split a large span batch to admire the udp packet size limit.
|
||||
"""
|
||||
|
||||
OTEL_SERVICE_NAME = "OTEL_SERVICE_NAME"
|
||||
"""
|
||||
.. envvar:: OTEL_SERVICE_NAME
|
||||
|
||||
Convenience environment variable for setting the service name resource attribute.
|
||||
The following two environment variables have the same effect
|
||||
|
||||
.. code-block:: console
|
||||
|
||||
OTEL_SERVICE_NAME=my-python-service
|
||||
|
||||
OTEL_RESOURCE_ATTRIBUTES=service.name=my-python-service
|
||||
|
||||
|
||||
If both are set, :envvar:`OTEL_SERVICE_NAME` takes precedence.
|
||||
"""
|
||||
|
||||
|
||||
_OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED = (
|
||||
"OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED
|
||||
|
||||
The :envvar:`OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED` environment variable allows users to
|
||||
enable/disable the auto instrumentation for the python logging module.
|
||||
Default: False
|
||||
|
||||
Note: Logs SDK and its related settings are experimental.
|
||||
"""
|
||||
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE` environment
|
||||
variable allows users to set the default aggregation temporality policy to use
|
||||
on the basis of instrument kind. The valid (case-insensitive) values are:
|
||||
|
||||
``CUMULATIVE``: Use ``CUMULATIVE`` aggregation temporality for all instrument kinds.
|
||||
``DELTA``: Use ``DELTA`` aggregation temporality for ``Counter``, ``Asynchronous Counter`` and ``Histogram``.
|
||||
Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter`` and ``Asynchronous UpDownCounter``.
|
||||
``LOWMEMORY``: Use ``DELTA`` aggregation temporality for ``Counter`` and ``Histogram``.
|
||||
Use ``CUMULATIVE`` aggregation temporality for ``UpDownCounter``, ``AsynchronousCounter`` and ``Asynchronous UpDownCounter``.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_JAEGER_GRPC_INSECURE = "OTEL_EXPORTER_JAEGER_GRPC_INSECURE"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_JAEGER_GRPC_INSECURE
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_JAEGER_GRPC_INSECURE` is a boolean flag to True if collector has no encryption or authentication.
|
||||
"""
|
||||
|
||||
OTEL_METRIC_EXPORT_INTERVAL = "OTEL_METRIC_EXPORT_INTERVAL"
|
||||
"""
|
||||
.. envvar:: OTEL_METRIC_EXPORT_INTERVAL
|
||||
|
||||
The :envvar:`OTEL_METRIC_EXPORT_INTERVAL` is the time interval (in milliseconds) between the start of two export attempts.
|
||||
"""
|
||||
|
||||
OTEL_METRIC_EXPORT_TIMEOUT = "OTEL_METRIC_EXPORT_TIMEOUT"
|
||||
"""
|
||||
.. envvar:: OTEL_METRIC_EXPORT_TIMEOUT
|
||||
|
||||
The :envvar:`OTEL_METRIC_EXPORT_TIMEOUT` is the maximum allowed time (in milliseconds) to export data.
|
||||
"""
|
||||
|
||||
OTEL_METRICS_EXEMPLAR_FILTER = "OTEL_METRICS_EXEMPLAR_FILTER"
|
||||
"""
|
||||
.. envvar:: OTEL_METRICS_EXEMPLAR_FILTER
|
||||
|
||||
The :envvar:`OTEL_METRICS_EXEMPLAR_FILTER` is the filter for which measurements can become Exemplars.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION = (
|
||||
"OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION"
|
||||
)
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION` is the default aggregation to use for histogram instruments.
|
||||
"""
|
||||
|
||||
OTEL_EXPERIMENTAL_RESOURCE_DETECTORS = "OTEL_EXPERIMENTAL_RESOURCE_DETECTORS"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPERIMENTAL_RESOURCE_DETECTORS
|
||||
|
||||
The :envvar:`OTEL_EXPERIMENTAL_RESOURCE_DETECTORS` is a comma-separated string
|
||||
of names of resource detectors. These names must be the same as the names of
|
||||
entry points for the ```opentelemetry_resource_detector``` entry point. This is an
|
||||
experimental feature and the name of this variable and its behavior can change
|
||||
in a non-backwards compatible way.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_PROMETHEUS_HOST = "OTEL_EXPORTER_PROMETHEUS_HOST"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_PROMETHEUS_HOST
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_PROMETHEUS_HOST` environment variable configures the host used by
|
||||
the Prometheus exporter.
|
||||
Default: "localhost"
|
||||
|
||||
This is an experimental environment variable and the name of this variable and its behavior can
|
||||
change in a non-backwards compatible way.
|
||||
"""
|
||||
|
||||
OTEL_EXPORTER_PROMETHEUS_PORT = "OTEL_EXPORTER_PROMETHEUS_PORT"
|
||||
"""
|
||||
.. envvar:: OTEL_EXPORTER_PROMETHEUS_PORT
|
||||
|
||||
The :envvar:`OTEL_EXPORTER_PROMETHEUS_PORT` environment variable configures the port used by
|
||||
the Prometheus exporter.
|
||||
Default: 9464
|
||||
|
||||
This is an experimental environment variable and the name of this variable and its behavior can
|
||||
change in a non-backwards compatible way.
|
||||
"""
|
||||
Binary file not shown.
@@ -0,0 +1,142 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Global Error Handler
|
||||
|
||||
This module provides a global error handler and an interface that allows
|
||||
error handlers to be registered with the global error handler via entry points.
|
||||
A default error handler is also provided.
|
||||
|
||||
To use this feature, users can create an error handler that is registered
|
||||
using the ``opentelemetry_error_handler`` entry point. A class is to be
|
||||
registered in this entry point, this class must inherit from the
|
||||
``opentelemetry.sdk.error_handler.ErrorHandler`` class and implement the
|
||||
corresponding ``handle`` method. This method will receive the exception object
|
||||
that is to be handled. The error handler class should also inherit from the
|
||||
exception classes it wants to handle. For example, this would be an error
|
||||
handler that handles ``ZeroDivisionError``:
|
||||
|
||||
.. code:: python
|
||||
|
||||
from opentelemetry.sdk.error_handler import ErrorHandler
|
||||
from logging import getLogger
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorHandler0(ErrorHandler, ZeroDivisionError):
|
||||
|
||||
def _handle(self, error: Exception, *args, **kwargs):
|
||||
|
||||
logger.exception("ErrorHandler0 handling a ZeroDivisionError")
|
||||
|
||||
To use the global error handler, just instantiate it as a context manager where
|
||||
you want exceptions to be handled:
|
||||
|
||||
|
||||
.. code:: python
|
||||
|
||||
from opentelemetry.sdk.error_handler import GlobalErrorHandler
|
||||
|
||||
with GlobalErrorHandler():
|
||||
1 / 0
|
||||
|
||||
If the class of the exception raised in the scope of the ``GlobalErrorHandler``
|
||||
object is not parent of any registered error handler, then the default error
|
||||
handler will handle the exception. This default error handler will only log the
|
||||
exception to standard logging, the exception won't be raised any further.
|
||||
"""
|
||||
|
||||
from abc import ABC, abstractmethod
|
||||
from logging import getLogger
|
||||
|
||||
from opentelemetry.util._importlib_metadata import entry_points
|
||||
|
||||
logger = getLogger(__name__)
|
||||
|
||||
|
||||
class ErrorHandler(ABC):
|
||||
@abstractmethod
|
||||
def _handle(self, error: Exception, *args, **kwargs):
|
||||
"""
|
||||
Handle an exception
|
||||
"""
|
||||
|
||||
|
||||
class _DefaultErrorHandler(ErrorHandler):
|
||||
"""
|
||||
Default error handler
|
||||
|
||||
This error handler just logs the exception using standard logging.
|
||||
"""
|
||||
|
||||
# pylint: disable=useless-return
|
||||
def _handle(self, error: Exception, *args, **kwargs):
|
||||
logger.exception("Error handled by default error handler: ")
|
||||
return None
|
||||
|
||||
|
||||
class GlobalErrorHandler:
|
||||
"""
|
||||
Global error handler
|
||||
|
||||
This is a singleton class that can be instantiated anywhere to get the
|
||||
global error handler. This object provides a handle method that receives
|
||||
an exception object that will be handled by the registered error handlers.
|
||||
"""
|
||||
|
||||
_instance = None
|
||||
|
||||
def __new__(cls) -> "GlobalErrorHandler":
|
||||
if cls._instance is None:
|
||||
cls._instance = super().__new__(cls)
|
||||
|
||||
return cls._instance
|
||||
|
||||
def __enter__(self):
|
||||
pass
|
||||
|
||||
# pylint: disable=no-self-use
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
if exc_value is None:
|
||||
return None
|
||||
|
||||
plugin_handled = False
|
||||
|
||||
error_handler_entry_points = entry_points(
|
||||
group="opentelemetry_error_handler"
|
||||
)
|
||||
|
||||
for error_handler_entry_point in error_handler_entry_points:
|
||||
error_handler_class = error_handler_entry_point.load()
|
||||
|
||||
if issubclass(error_handler_class, exc_value.__class__):
|
||||
try:
|
||||
error_handler_class()._handle(exc_value)
|
||||
plugin_handled = True
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error_handling_error:
|
||||
logger.exception(
|
||||
"%s error while handling error %s by error handler %s",
|
||||
error_handling_error.__class__.__name__,
|
||||
exc_value.__class__.__name__,
|
||||
error_handler_class.__name__,
|
||||
)
|
||||
|
||||
if not plugin_handled:
|
||||
_DefaultErrorHandler()._handle(exc_value)
|
||||
|
||||
return True
|
||||
Binary file not shown.
@@ -0,0 +1,57 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
from opentelemetry.sdk.metrics._internal import Meter, MeterProvider
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlignedHistogramBucketExemplarReservoir,
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
Exemplar,
|
||||
ExemplarFilter,
|
||||
ExemplarReservoir,
|
||||
SimpleFixedSizeExemplarReservoir,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
Counter,
|
||||
Histogram,
|
||||
ObservableCounter,
|
||||
ObservableGauge,
|
||||
ObservableUpDownCounter,
|
||||
UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import Gauge as _Gauge
|
||||
|
||||
__all__ = [
|
||||
"AlignedHistogramBucketExemplarReservoir",
|
||||
"AlwaysOnExemplarFilter",
|
||||
"AlwaysOffExemplarFilter",
|
||||
"Exemplar",
|
||||
"ExemplarFilter",
|
||||
"ExemplarReservoir",
|
||||
"Meter",
|
||||
"MeterProvider",
|
||||
"MetricsTimeoutError",
|
||||
"Counter",
|
||||
"Histogram",
|
||||
"_Gauge",
|
||||
"ObservableCounter",
|
||||
"ObservableGauge",
|
||||
"ObservableUpDownCounter",
|
||||
"SimpleFixedSizeExemplarReservoir",
|
||||
"UpDownCounter",
|
||||
"TraceBasedExemplarFilter",
|
||||
]
|
||||
Binary file not shown.
@@ -0,0 +1,582 @@
|
||||
# Copyright The OpenTelemetry Authors
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import weakref
|
||||
from atexit import register, unregister
|
||||
from logging import getLogger
|
||||
from os import environ
|
||||
from threading import Lock
|
||||
from time import time_ns
|
||||
from typing import Optional, Sequence
|
||||
|
||||
# This kind of import is needed to avoid Sphinx errors.
|
||||
import opentelemetry.sdk.metrics
|
||||
from opentelemetry.metrics import Counter as APICounter
|
||||
from opentelemetry.metrics import Histogram as APIHistogram
|
||||
from opentelemetry.metrics import Meter as APIMeter
|
||||
from opentelemetry.metrics import MeterProvider as APIMeterProvider
|
||||
from opentelemetry.metrics import NoOpMeter
|
||||
from opentelemetry.metrics import ObservableCounter as APIObservableCounter
|
||||
from opentelemetry.metrics import ObservableGauge as APIObservableGauge
|
||||
from opentelemetry.metrics import (
|
||||
ObservableUpDownCounter as APIObservableUpDownCounter,
|
||||
)
|
||||
from opentelemetry.metrics import UpDownCounter as APIUpDownCounter
|
||||
from opentelemetry.metrics import _Gauge as APIGauge
|
||||
from opentelemetry.sdk.environment_variables import (
|
||||
OTEL_METRICS_EXEMPLAR_FILTER,
|
||||
OTEL_SDK_DISABLED,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.exceptions import MetricsTimeoutError
|
||||
from opentelemetry.sdk.metrics._internal.exemplar import (
|
||||
AlwaysOffExemplarFilter,
|
||||
AlwaysOnExemplarFilter,
|
||||
ExemplarFilter,
|
||||
TraceBasedExemplarFilter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.instrument import (
|
||||
_Counter,
|
||||
_Gauge,
|
||||
_Histogram,
|
||||
_ObservableCounter,
|
||||
_ObservableGauge,
|
||||
_ObservableUpDownCounter,
|
||||
_UpDownCounter,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.measurement_consumer import (
|
||||
MeasurementConsumer,
|
||||
SynchronousMeasurementConsumer,
|
||||
)
|
||||
from opentelemetry.sdk.metrics._internal.sdk_configuration import (
|
||||
SdkConfiguration,
|
||||
)
|
||||
from opentelemetry.sdk.resources import Resource
|
||||
from opentelemetry.sdk.util.instrumentation import InstrumentationScope
|
||||
from opentelemetry.util._once import Once
|
||||
from opentelemetry.util.types import (
|
||||
Attributes,
|
||||
)
|
||||
|
||||
_logger = getLogger(__name__)
|
||||
|
||||
|
||||
class Meter(APIMeter):
|
||||
"""See `opentelemetry.metrics.Meter`."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
instrumentation_scope: InstrumentationScope,
|
||||
measurement_consumer: MeasurementConsumer,
|
||||
):
|
||||
super().__init__(
|
||||
name=instrumentation_scope.name,
|
||||
version=instrumentation_scope.version,
|
||||
schema_url=instrumentation_scope.schema_url,
|
||||
)
|
||||
self._instrumentation_scope = instrumentation_scope
|
||||
self._measurement_consumer = measurement_consumer
|
||||
self._instrument_id_instrument = {}
|
||||
self._instrument_id_instrument_lock = Lock()
|
||||
|
||||
def create_counter(self, name, unit="", description="") -> APICounter:
|
||||
status = self._register_instrument(name, _Counter, unit, description)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APICounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Counter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_up_down_counter(
|
||||
self, name, unit="", description=""
|
||||
) -> APIUpDownCounter:
|
||||
status = self._register_instrument(
|
||||
name, _UpDownCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _UpDownCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_counter(
|
||||
self,
|
||||
name,
|
||||
callbacks=None,
|
||||
unit="",
|
||||
description="",
|
||||
) -> APIObservableCounter:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_histogram(
|
||||
self,
|
||||
name: str,
|
||||
unit: str = "",
|
||||
description: str = "",
|
||||
*,
|
||||
explicit_bucket_boundaries_advisory: Optional[Sequence[float]] = None,
|
||||
) -> APIHistogram:
|
||||
if explicit_bucket_boundaries_advisory is not None:
|
||||
invalid_advisory = False
|
||||
if isinstance(explicit_bucket_boundaries_advisory, Sequence):
|
||||
try:
|
||||
invalid_advisory = not (
|
||||
all(
|
||||
isinstance(e, (float, int))
|
||||
for e in explicit_bucket_boundaries_advisory
|
||||
)
|
||||
)
|
||||
except (KeyError, TypeError):
|
||||
invalid_advisory = True
|
||||
else:
|
||||
invalid_advisory = True
|
||||
|
||||
if invalid_advisory:
|
||||
explicit_bucket_boundaries_advisory = None
|
||||
_logger.warning(
|
||||
"explicit_bucket_boundaries_advisory must be a sequence of numbers"
|
||||
)
|
||||
|
||||
status = self._register_instrument(
|
||||
name,
|
||||
_Histogram,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIHistogram.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Histogram(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
explicit_bucket_boundaries_advisory,
|
||||
)
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_gauge(self, name, unit="", description="") -> APIGauge:
|
||||
status = self._register_instrument(name, _Gauge, unit, description)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _Gauge(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_gauge(
|
||||
self, name, callbacks=None, unit="", description=""
|
||||
) -> APIObservableGauge:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableGauge, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableGauge.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableGauge(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
def create_observable_up_down_counter(
|
||||
self, name, callbacks=None, unit="", description=""
|
||||
) -> APIObservableUpDownCounter:
|
||||
status = self._register_instrument(
|
||||
name, _ObservableUpDownCounter, unit, description
|
||||
)
|
||||
|
||||
if status.conflict:
|
||||
# FIXME #2558 go through all views here and check if this
|
||||
# instrument registration conflict can be fixed. If it can be, do
|
||||
# not log the following warning.
|
||||
self._log_instrument_registration_conflict(
|
||||
name,
|
||||
APIObservableUpDownCounter.__name__,
|
||||
unit,
|
||||
description,
|
||||
status,
|
||||
)
|
||||
if status.already_registered:
|
||||
with self._instrument_id_instrument_lock:
|
||||
return self._instrument_id_instrument[status.instrument_id]
|
||||
|
||||
instrument = _ObservableUpDownCounter(
|
||||
name,
|
||||
self._instrumentation_scope,
|
||||
self._measurement_consumer,
|
||||
callbacks,
|
||||
unit,
|
||||
description,
|
||||
)
|
||||
|
||||
self._measurement_consumer.register_asynchronous_instrument(instrument)
|
||||
|
||||
with self._instrument_id_instrument_lock:
|
||||
self._instrument_id_instrument[status.instrument_id] = instrument
|
||||
return instrument
|
||||
|
||||
|
||||
def _get_exemplar_filter(exemplar_filter: str) -> ExemplarFilter:
|
||||
if exemplar_filter == "trace_based":
|
||||
return TraceBasedExemplarFilter()
|
||||
if exemplar_filter == "always_on":
|
||||
return AlwaysOnExemplarFilter()
|
||||
if exemplar_filter == "always_off":
|
||||
return AlwaysOffExemplarFilter()
|
||||
msg = f"Unknown exemplar filter '{exemplar_filter}'."
|
||||
raise ValueError(msg)
|
||||
|
||||
|
||||
class MeterProvider(APIMeterProvider):
|
||||
r"""See `opentelemetry.metrics.MeterProvider`.
|
||||
|
||||
Args:
|
||||
metric_readers: Register metric readers to collect metrics from the SDK
|
||||
on demand. Each :class:`opentelemetry.sdk.metrics.export.MetricReader` is
|
||||
completely independent and will collect separate streams of
|
||||
metrics. TODO: reference ``PeriodicExportingMetricReader`` usage with push
|
||||
exporters here.
|
||||
resource: The resource representing what the metrics emitted from the SDK pertain to.
|
||||
shutdown_on_exit: If true, registers an `atexit` handler to call
|
||||
`MeterProvider.shutdown`
|
||||
views: The views to configure the metric output the SDK
|
||||
|
||||
By default, instruments which do not match any :class:`opentelemetry.sdk.metrics.view.View` (or if no :class:`opentelemetry.sdk.metrics.view.View`\ s
|
||||
are provided) will report metrics with the default aggregation for the
|
||||
instrument's kind. To disable instruments by default, configure a match-all
|
||||
:class:`opentelemetry.sdk.metrics.view.View` with `DropAggregation` and then create :class:`opentelemetry.sdk.metrics.view.View`\ s to re-enable
|
||||
individual instruments:
|
||||
|
||||
.. code-block:: python
|
||||
:caption: Disable default views
|
||||
|
||||
MeterProvider(
|
||||
views=[
|
||||
View(instrument_name="*", aggregation=DropAggregation()),
|
||||
View(instrument_name="mycounter"),
|
||||
],
|
||||
# ...
|
||||
)
|
||||
"""
|
||||
|
||||
_all_metric_readers_lock = Lock()
|
||||
_all_metric_readers = weakref.WeakSet()
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
metric_readers: Sequence[
|
||||
"opentelemetry.sdk.metrics.export.MetricReader"
|
||||
] = (),
|
||||
resource: Optional[Resource] = None,
|
||||
exemplar_filter: Optional[ExemplarFilter] = None,
|
||||
shutdown_on_exit: bool = True,
|
||||
views: Sequence["opentelemetry.sdk.metrics.view.View"] = (),
|
||||
):
|
||||
self._lock = Lock()
|
||||
self._meter_lock = Lock()
|
||||
self._atexit_handler = None
|
||||
if resource is None:
|
||||
resource = Resource.create({})
|
||||
self._sdk_config = SdkConfiguration(
|
||||
exemplar_filter=(
|
||||
exemplar_filter
|
||||
or _get_exemplar_filter(
|
||||
environ.get(OTEL_METRICS_EXEMPLAR_FILTER, "trace_based")
|
||||
)
|
||||
),
|
||||
resource=resource,
|
||||
metric_readers=metric_readers,
|
||||
views=views,
|
||||
)
|
||||
self._measurement_consumer = SynchronousMeasurementConsumer(
|
||||
sdk_config=self._sdk_config
|
||||
)
|
||||
disabled = environ.get(OTEL_SDK_DISABLED, "")
|
||||
self._disabled = disabled.lower().strip() == "true"
|
||||
|
||||
if shutdown_on_exit:
|
||||
self._atexit_handler = register(self.shutdown)
|
||||
|
||||
self._meters = {}
|
||||
self._shutdown_once = Once()
|
||||
self._shutdown = False
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
with self._all_metric_readers_lock:
|
||||
if metric_reader in self._all_metric_readers:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
f"MetricReader {metric_reader} has been registered "
|
||||
"already in other MeterProvider instance"
|
||||
)
|
||||
|
||||
self._all_metric_readers.add(metric_reader)
|
||||
|
||||
metric_reader._set_collect_callback(
|
||||
self._measurement_consumer.collect
|
||||
)
|
||||
|
||||
def force_flush(self, timeout_millis: float = 10_000) -> bool:
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
metric_reader_error = {}
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
current_ts = time_ns()
|
||||
try:
|
||||
if current_ts >= deadline_ns:
|
||||
raise MetricsTimeoutError(
|
||||
"Timed out while flushing metric readers"
|
||||
)
|
||||
metric_reader.force_flush(
|
||||
timeout_millis=(deadline_ns - current_ts) / 10**6
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error:
|
||||
metric_reader_error[metric_reader] = error
|
||||
|
||||
if metric_reader_error:
|
||||
metric_reader_error_string = "\n".join(
|
||||
[
|
||||
f"{metric_reader.__class__.__name__}: {repr(error)}"
|
||||
for metric_reader, error in metric_reader_error.items()
|
||||
]
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"MeterProvider.force_flush failed because the following "
|
||||
"metric readers failed during collect:\n"
|
||||
f"{metric_reader_error_string}"
|
||||
)
|
||||
return True
|
||||
|
||||
def shutdown(self, timeout_millis: float = 30_000):
|
||||
deadline_ns = time_ns() + timeout_millis * 10**6
|
||||
|
||||
def _shutdown():
|
||||
self._shutdown = True
|
||||
|
||||
did_shutdown = self._shutdown_once.do_once(_shutdown)
|
||||
|
||||
if not did_shutdown:
|
||||
_logger.warning("shutdown can only be called once")
|
||||
return
|
||||
|
||||
metric_reader_error = {}
|
||||
|
||||
for metric_reader in self._sdk_config.metric_readers:
|
||||
current_ts = time_ns()
|
||||
try:
|
||||
if current_ts >= deadline_ns:
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
"Didn't get to execute, deadline already exceeded"
|
||||
)
|
||||
metric_reader.shutdown(
|
||||
timeout_millis=(deadline_ns - current_ts) / 10**6
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-caught
|
||||
except Exception as error:
|
||||
metric_reader_error[metric_reader] = error
|
||||
|
||||
if self._atexit_handler is not None:
|
||||
unregister(self._atexit_handler)
|
||||
self._atexit_handler = None
|
||||
|
||||
if metric_reader_error:
|
||||
metric_reader_error_string = "\n".join(
|
||||
[
|
||||
f"{metric_reader.__class__.__name__}: {repr(error)}"
|
||||
for metric_reader, error in metric_reader_error.items()
|
||||
]
|
||||
)
|
||||
|
||||
# pylint: disable=broad-exception-raised
|
||||
raise Exception(
|
||||
(
|
||||
"MeterProvider.shutdown failed because the following "
|
||||
"metric readers failed during shutdown:\n"
|
||||
f"{metric_reader_error_string}"
|
||||
)
|
||||
)
|
||||
|
||||
def get_meter(
|
||||
self,
|
||||
name: str,
|
||||
version: Optional[str] = None,
|
||||
schema_url: Optional[str] = None,
|
||||
attributes: Optional[Attributes] = None,
|
||||
) -> Meter:
|
||||
if self._disabled:
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
if self._shutdown:
|
||||
_logger.warning(
|
||||
"A shutdown `MeterProvider` can not provide a `Meter`"
|
||||
)
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
if not name:
|
||||
_logger.warning("Meter name cannot be None or empty.")
|
||||
return NoOpMeter(name, version=version, schema_url=schema_url)
|
||||
|
||||
info = InstrumentationScope(name, version, schema_url, attributes)
|
||||
with self._meter_lock:
|
||||
if not self._meters.get(info):
|
||||
# FIXME #2558 pass SDKConfig object to meter so that the meter
|
||||
# has access to views.
|
||||
self._meters[info] = Meter(
|
||||
info,
|
||||
self._measurement_consumer,
|
||||
)
|
||||
return self._meters[info]
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user