diff --git a/docs-requirements.txt b/docs-requirements.txt index 6342594af3..43e2265f7c 100644 --- a/docs-requirements.txt +++ b/docs-requirements.txt @@ -33,9 +33,11 @@ elasticsearch>=6.0,<9.0 flask~=2.0 falcon~=2.0 grpcio~=1.27 +httpx>=0.18.0 kafka-python>=2.0,<3.0 mysql-connector-python~=8.0 mysqlclient~=2.1.1 +openai >= 1.26.0 psutil>=5 psycopg~=3.1.17 pika>=0.12.0 @@ -47,7 +49,6 @@ remoulade>=0.50 sqlalchemy>=1.0 tornado>=5.1.1 tortoise-orm>=0.17.0 -httpx>=0.18.0 # indirect dependency pins markupsafe==2.0.1 diff --git a/docs/conf.py b/docs/conf.py index 4b2bda04a8..8233fccb15 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -40,6 +40,13 @@ if isdir(join(instr, f)) ] +instr_genai = "../instrumentation-genai" +instr_genai_dirs = [ + os.path.abspath("/".join(["../instrumentation-genai", f, "src"])) + for f in listdir(instr_genai) + if isdir(join(instr_genai, f)) +] + prop = "../propagator" prop_dirs = [ os.path.abspath("/".join([prop, f, "src"])) @@ -60,7 +67,14 @@ for f in listdir(resource) if isdir(join(resource, f)) ] -sys.path[:0] = exp_dirs + instr_dirs + sdk_ext_dirs + prop_dirs + resource_dirs +sys.path[:0] = ( + exp_dirs + + instr_dirs + + instr_genai_dirs + + sdk_ext_dirs + + prop_dirs + + resource_dirs +) # -- Project information ----------------------------------------------------- diff --git a/docs/index.rst b/docs/index.rst index e2bf32e12e..f9144c5209 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -24,7 +24,7 @@ installed separately via pip: pip install opentelemetry-instrumentation-{instrumentation} pip install opentelemetry-sdk-extension-{sdk-extension} -A complete list of packages can be found at the +A complete list of packages can be found at the `Contrib repo instrumentation `_ and `Contrib repo exporter `_ directories. @@ -50,6 +50,7 @@ install cd opentelemetry-python-contrib pip install -e ./instrumentation/opentelemetry-instrumentation-flask pip install -e ./instrumentation/opentelemetry-instrumentation-botocore + pip install -e ./instrumentation-genai/opentelemetry-instrumentation-openai-v2 pip install -e ./sdk-extension/opentelemetry-sdk-extension-aws pip install -e ./resource/opentelemetry-resource-detector-container @@ -62,6 +63,14 @@ install instrumentation/** +.. toctree:: + :maxdepth: 2 + :caption: OpenTelemetry Generative AI Instrumentations + :name: Generative AI Instrumentations + :glob: + + instrumentation-genai/** + .. toctree:: :maxdepth: 2 :caption: OpenTelemetry Propagators diff --git a/docs/instrumentation-genai/openai.rst b/docs/instrumentation-genai/openai.rst new file mode 100644 index 0000000000..71f464c533 --- /dev/null +++ b/docs/instrumentation-genai/openai.rst @@ -0,0 +1,7 @@ +OpenTelemetry Python - OpenAI Instrumentation +============================================= + +.. automodule:: opentelemetry.instrumentation.openai_v2 + :members: + :undoc-members: + :show-inheritance: diff --git a/docs/nitpick-exceptions.ini b/docs/nitpick-exceptions.ini index 9dc379cf93..845883fdd8 100644 --- a/docs/nitpick-exceptions.ini +++ b/docs/nitpick-exceptions.ini @@ -24,6 +24,7 @@ py-class= httpx.Client httpx.AsyncClient httpx.BaseTransport + openai.BaseTransport httpx.AsyncBaseTransport httpx.SyncByteStream httpx.AsyncByteStream diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst index bbd142a97e..d2cb0b5724 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/README.rst @@ -19,8 +19,60 @@ package to your requirements. pip install opentelemetry-instrumentation-openai-v2 -If you don't have an OpenAI application, yet, try our `example `_ -which only needs a valid OpenAI API key. +If you don't have an OpenAI application, yet, try our `examples `_ +which only need a valid OpenAI API key. + +Check out `zero-code example `_ for a quick start. + +Usage +----- + +This section describes how to set up OpenAI instrumentation if you're setting OpenTelemetry up manually. +Check out the `manual example `_ for more details. + +Instrumenting all clients +************************* + +When using the instrumentor, all clients will automatically trace OpenAI chat completion operations. +You can also optionally capture prompts and completions as log events. + +Make sure to configure OpenTelemetry tracing, logging, and events to capture all telemetry emitted by the instrumentation. + +.. code-block:: python + + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + + OpenAIInstrumentor().instrument() + + client = OpenAI() + response = client.chat.completions.create( + model="gpt-4o-mini", + messages=[ + {"role": "user", "content": "Write a short poem on open telemetry."}, + ], + ) + +Enabling message content +************************* + +Message content such as the contents of the prompt, completion, function arguments and return values +are not captured by default. To capture message content as log events, set the environment variable +`OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT` to `true`. + +Uninstrument +************ + +To uninstrument clients, call the uninstrument method: + +.. code-block:: python + + from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor + + OpenAIInstrumentor().instrument() + # ... + + # Uninstrument all clients + OpenAIInstrumentor().uninstrument() References ---------- diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/.env b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/.env new file mode 100644 index 0000000000..1e77ee78c0 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/.env @@ -0,0 +1,16 @@ +# Update this with your real OpenAI API key +OPENAI_API_KEY=sk-YOUR_API_KEY + +# Uncomment to use Ollama instead of OpenAI +# OPENAI_BASE_URL=http://localhost:11434/v1 +# OPENAI_API_KEY=unused +# CHAT_MODEL=qwen2.5:0.5b + +# Uncomment and change to your OTLP endpoint +# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL=grpc + +OTEL_SERVICE_NAME=opentelemetry-python-openai + +# Change to 'false' to hide prompt and completion content +OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/README.rst b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/README.rst similarity index 67% rename from instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/README.rst rename to instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/README.rst index 019e141c70..73e4d1bb0c 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/README.rst +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/README.rst @@ -1,8 +1,7 @@ OpenTelemetry OpenAI Instrumentation Example ============================================ -This is an example of how to instrument OpenAI calls with zero code changes, -using `opentelemetry-instrument`. +This is an example of how to instrument OpenAI calls when configuring OpenTelemetry SDK and Instrumentations manually. When `main.py `_ is run, it exports traces and logs to an OTLP compatible endpoint. Traces include details such as the model used and the @@ -10,12 +9,18 @@ duration of the chat request. Logs capture the chat request and the generated response, providing a comprehensive view of the performance and behavior of your OpenAI requests. +Note: `.env <.env>`_ file configures additional environment variables: + +- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` configures +OpenAI instrumentation to capture prompt and completion contents on +events. + Setup ----- Minimally, update the `.env <.env>`_ file with your "OPENAI_API_KEY". An OTLP compatible endpoint should be listening for traces and logs on -http://localhost:4318. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well. +http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well. Next, set up a virtual environment like this: @@ -33,7 +38,7 @@ Run the example like this: :: - dotenv run -- opentelemetry-instrument python main.py + dotenv run -- python main.py You should see a poem generated by OpenAI while traces and logs export to your configured observability tool. diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/main.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/main.py new file mode 100644 index 0000000000..4b0c121b7a --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/main.py @@ -0,0 +1,53 @@ +# pylint: skip-file +import os + +from openai import OpenAI + +# NOTE: OpenTelemetry Python Logs and Events APIs are in beta +from opentelemetry import _events, _logs, trace +from opentelemetry.exporter.otlp.proto.grpc._log_exporter import ( + OTLPLogExporter, +) +from opentelemetry.exporter.otlp.proto.grpc.trace_exporter import ( + OTLPSpanExporter, +) +from opentelemetry.instrumentation.openai_v2 import OpenAIInstrumentor +from opentelemetry.sdk._events import EventLoggerProvider +from opentelemetry.sdk._logs import LoggerProvider +from opentelemetry.sdk._logs.export import BatchLogRecordProcessor +from opentelemetry.sdk.trace import TracerProvider +from opentelemetry.sdk.trace.export import BatchSpanProcessor + +# configure tracing +trace.set_tracer_provider(TracerProvider()) +trace.get_tracer_provider().add_span_processor( + BatchSpanProcessor(OTLPSpanExporter()) +) + +# configure logging and events +_logs.set_logger_provider(LoggerProvider()) +_logs.get_logger_provider().add_log_record_processor( + BatchLogRecordProcessor(OTLPLogExporter()) +) +_events.set_event_logger_provider(EventLoggerProvider()) + +# instrument OpenAI +OpenAIInstrumentor().instrument() + + +def main(): + client = OpenAI() + chat_completion = client.chat.completions.create( + model=os.getenv("CHAT_MODEL", "gpt-4o-mini"), + messages=[ + { + "role": "user", + "content": "Write a short poem on OpenTelemetry.", + }, + ], + ) + print(chat_completion.choices[0].message.content) + + +if __name__ == "__main__": + main() diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/requirements.txt new file mode 100644 index 0000000000..436f63e1d5 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/manual/requirements.txt @@ -0,0 +1,5 @@ +openai~=1.54.4 + +opentelemetry-sdk~=1.28.2 +opentelemetry-exporter-otlp-proto-grpc~=1.28.2 +opentelemetry-instrumentation-openai-v2~=2.0b0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/.env b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/.env similarity index 65% rename from instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/.env rename to instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/.env index d6afa66723..7dfa745e3b 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/.env +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/.env @@ -6,13 +6,16 @@ OPENAI_API_KEY=sk-YOUR_API_KEY # OPENAI_API_KEY=unused # CHAT_MODEL=qwen2.5:0.5b -OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4318 -OTEL_EXPORTER_OTLP_PROTOCOL=http/protobuf +# Uncomment and change to your OTLP endpoint +# OTEL_EXPORTER_OTLP_ENDPOINT=http://localhost:4317 +# OTEL_EXPORTER_OTLP_PROTOCOL=grpc + OTEL_SERVICE_NAME=opentelemetry-python-openai # Change to 'false' to disable logging OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true # Change to 'console' if your OTLP endpoint doesn't support logs -OTEL_LOGS_EXPORTER=otlp_proto_http +# TODO: this should not be necessary once https://github.com/open-telemetry/opentelemetry-python-contrib/pull/3042 is released +OTEL_LOGS_EXPORTER=otlp # Change to 'false' to hide prompt and completion content OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/README.rst b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/README.rst new file mode 100644 index 0000000000..441c6a612e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/README.rst @@ -0,0 +1,48 @@ +OpenTelemetry OpenAI Zero-Code Instrumentation Example +====================================================== + +This is an example of how to instrument OpenAI calls with zero code changes, +using `opentelemetry-instrument`. + +When `main.py `_ is run, it exports traces and logs to an OTLP +compatible endpoint. Traces include details such as the model used and the +duration of the chat request. Logs capture the chat request and the generated +response, providing a comprehensive view of the performance and behavior of +your OpenAI requests. + +Note: `.env <.env>`_ file configures additional environment variables: + +- `OTEL_PYTHON_LOGGING_AUTO_INSTRUMENTATION_ENABLED=true` configures +OpenTelemetry SDK to export logs and events. +- `OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT=true` configures +OpenAI instrumentation to capture prompt and completion contents on +events. +- `OTEL_LOGS_EXPORTER=otlp` to specify exporter type. + +Setup +----- + +Minimally, update the `.env <.env>`_ file with your "OPENAI_API_KEY". An +OTLP compatible endpoint should be listening for traces and logs on +http://localhost:4317. If not, update "OTEL_EXPORTER_OTLP_ENDPOINT" as well. + +Next, set up a virtual environment like this: + +:: + + python3 -m venv .venv + source .venv/bin/activate + pip install "python-dotenv[cli]" + pip install -r requirements.txt + +Run +--- + +Run the example like this: + +:: + + dotenv run -- opentelemetry-instrument python main.py + +You should see a poem generated by OpenAI while traces and logs export to your +configured observability tool. diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/main.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/main.py similarity index 100% rename from instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/main.py rename to instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/main.py diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/requirements.txt b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/requirements.txt similarity index 71% rename from instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/requirements.txt rename to instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/requirements.txt index 9ec9bff320..77ed112ebb 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/example/requirements.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/examples/zero-code/requirements.txt @@ -1,6 +1,6 @@ openai~=1.54.4 opentelemetry-sdk~=1.28.2 -opentelemetry-exporter-otlp-proto-http~=1.28.2 +opentelemetry-exporter-otlp-proto-grpc~=1.28.2 opentelemetry-distro~=0.49b2 opentelemetry-instrumentation-openai-v2~=2.0b0 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/pyproject.toml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/pyproject.toml index e28611d0c5..f37b6915e3 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/pyproject.toml +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/pyproject.toml @@ -39,7 +39,7 @@ instruments = [ openai = "opentelemetry.instrumentation.openai_v2:OpenAIInstrumentor" [project.urls] -Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation/opentelemetry-instrumentation-openai-v2" +Homepage = "https://github.com/open-telemetry/opentelemetry-python-contrib/tree/main/instrumentation-genai/opentelemetry-instrumentation-openai-v2" [tool.hatch.version] path = "src/opentelemetry/instrumentation/openai_v2/version.py" diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/test-requirements-0.txt b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/test-requirements-0.txt index a68dbbf744..bd22b7a870 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/test-requirements-0.txt +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/test-requirements-0.txt @@ -8,6 +8,7 @@ pytest==7.4.4 pytest-vcr==1.0.2 pytest-asyncio==0.21.0 wrapt==1.16.0 +opentelemetry-exporter-otlp-proto-http~=1.28 opentelemetry-api==1.28 # when updating, also update in pyproject.toml opentelemetry-sdk==1.28 # when updating, also update in pyproject.toml opentelemetry-semantic-conventions==0.49b0 # when updating, also update in pyproject.toml diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_no_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_no_content.yaml new file mode 100644 index 0000000000..61ec4a646e --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_async_chat_completion_no_content.yaml @@ -0,0 +1,132 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": "Say this is a test" + } + ], + "model": "gpt-4o-mini", + "stream": false + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '106' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - AsyncOpenAI/Python 1.26.0 + x-stainless-arch: + - arm64 + x-stainless-async: + - async:asyncio + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.26.0 + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.5 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |- + { + "id": "chatcmpl-ASv9R2E7Yhb2e7bj4Xl0qm9s3J42Y", + "object": "chat.completion", + "created": 1731456237, + "model": "gpt-4o-mini-2024-07-18", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "This is a test. How can I assist you further?", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 12, + "completion_tokens": 12, + "total_tokens": 24, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "system_fingerprint": "fp_0ba0d124f1" + } + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8e1a80679a8311a6-MRS + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Wed, 13 Nov 2024 00:03:58 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + content-length: + - '796' + openai-organization: test_openai_org_id + openai-processing-ms: + - '359' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '30000' + x-ratelimit-limit-tokens: + - '150000000' + x-ratelimit-remaining-requests: + - '29999' + x-ratelimit-remaining-tokens: + - '149999978' + x-ratelimit-reset-requests: + - 2ms + x-ratelimit-reset-tokens: + - 0s + x-request-id: + - req_41ea134c1fc450d4ca4cf8d0c6a7c53a + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_no_content.yaml b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_no_content.yaml new file mode 100644 index 0000000000..2abb443fe3 --- /dev/null +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/cassettes/test_chat_completion_no_content.yaml @@ -0,0 +1,134 @@ +interactions: +- request: + body: |- + { + "messages": [ + { + "role": "user", + "content": "Say this is a test" + } + ], + "model": "gpt-4o-mini", + "stream": false + } + headers: + accept: + - application/json + accept-encoding: + - gzip, deflate + authorization: + - Bearer test_openai_api_key + connection: + - keep-alive + content-length: + - '106' + content-type: + - application/json + host: + - api.openai.com + user-agent: + - OpenAI/Python 1.54.3 + x-stainless-arch: + - arm64 + x-stainless-async: + - 'false' + x-stainless-lang: + - python + x-stainless-os: + - MacOS + x-stainless-package-version: + - 1.54.3 + x-stainless-retry-count: + - '0' + x-stainless-runtime: + - CPython + x-stainless-runtime-version: + - 3.12.6 + method: POST + uri: https://api.openai.com/v1/chat/completions + response: + body: + string: |- + { + "id": "chatcmpl-ASYMQRl3A3DXL9FWCK9tnGRcKIO7q", + "object": "chat.completion", + "created": 1731368630, + "model": "gpt-4o-mini-2024-07-18", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "This is a test.", + "refusal": null + }, + "logprobs": null, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 12, + "completion_tokens": 5, + "total_tokens": 17, + "prompt_tokens_details": { + "cached_tokens": 0, + "audio_tokens": 0 + }, + "completion_tokens_details": { + "reasoning_tokens": 0, + "audio_tokens": 0, + "accepted_prediction_tokens": 0, + "rejected_prediction_tokens": 0 + } + }, + "system_fingerprint": "fp_0ba0d124f1" + } + headers: + CF-Cache-Status: + - DYNAMIC + CF-RAY: + - 8e122593ff368bc8-SIN + Connection: + - keep-alive + Content-Type: + - application/json + Date: + - Mon, 11 Nov 2024 23:43:50 GMT + Server: + - cloudflare + Set-Cookie: test_set_cookie + Transfer-Encoding: + - chunked + X-Content-Type-Options: + - nosniff + access-control-expose-headers: + - X-Request-ID + alt-svc: + - h3=":443"; ma=86400 + content-length: + - '765' + openai-organization: test_openai_org_id + openai-processing-ms: + - '287' + openai-version: + - '2020-10-01' + strict-transport-security: + - max-age=31536000; includeSubDomains; preload + x-ratelimit-limit-requests: + - '10000' + x-ratelimit-limit-tokens: + - '200000' + x-ratelimit-remaining-requests: + - '9999' + x-ratelimit-remaining-tokens: + - '199977' + x-ratelimit-reset-requests: + - 8.64s + x-ratelimit-reset-tokens: + - 6ms + x-request-id: + - req_58cff97afd0e7c0bba910ccf0b044a6f + status: + code: 200 + message: OK +version: 1 diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py index 7ff7e46777..18e6582dff 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/conftest.py @@ -84,6 +84,10 @@ def vcr_config(): @pytest.fixture(scope="function") def instrument_no_content(tracer_provider, event_logger_provider): + os.environ.update( + {OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT: "False"} + ) + instrumentor = OpenAIInstrumentor() instrumentor.instrument( tracer_provider=tracer_provider, @@ -91,6 +95,7 @@ def instrument_no_content(tracer_provider, event_logger_provider): ) yield instrumentor + os.environ.pop(OTEL_INSTRUMENTATION_GENAI_CAPTURE_MESSAGE_CONTENT, None) instrumentor.uninstrument() diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py index 1c4b3cb7dd..e19bc7c311 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_async_chat_completions.py @@ -68,6 +68,34 @@ async def test_async_chat_completion_with_content( assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0]) +@pytest.mark.vcr() +@pytest.mark.asyncio() +async def test_async_chat_completion_no_content( + span_exporter, log_exporter, async_openai_client, instrument_no_content +): + llm_model_value = "gpt-4o-mini" + messages_value = [{"role": "user", "content": "Say this is a test"}] + + response = await async_openai_client.chat.completions.create( + messages=messages_value, model=llm_model_value, stream=False + ) + + spans = span_exporter.get_finished_spans() + assert_completion_attributes(spans[0], llm_model_value, response) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 2 + + assert_message_in_logs(logs[0], "gen_ai.user.message", None, spans[0]) + + choice_event = { + "index": 0, + "finish_reason": "stop", + "message": {"role": "assistant"}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0]) + + @pytest.mark.asyncio() async def test_async_chat_completion_bad_endpoint( span_exporter, instrument_no_content diff --git a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py index c6cb19aa8d..4f732290c0 100644 --- a/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py +++ b/instrumentation-genai/opentelemetry-instrumentation-openai-v2/tests/test_chat_completions.py @@ -67,6 +67,33 @@ def test_chat_completion_with_content( assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0]) +@pytest.mark.vcr() +def test_chat_completion_no_content( + span_exporter, log_exporter, openai_client, instrument_no_content +): + llm_model_value = "gpt-4o-mini" + messages_value = [{"role": "user", "content": "Say this is a test"}] + + response = openai_client.chat.completions.create( + messages=messages_value, model=llm_model_value, stream=False + ) + + spans = span_exporter.get_finished_spans() + assert_completion_attributes(spans[0], llm_model_value, response) + + logs = log_exporter.get_finished_logs() + assert len(logs) == 2 + + assert_message_in_logs(logs[0], "gen_ai.user.message", None, spans[0]) + + choice_event = { + "index": 0, + "finish_reason": "stop", + "message": {"role": "assistant"}, + } + assert_message_in_logs(logs[1], "gen_ai.choice", choice_event, spans[0]) + + def test_chat_completion_bad_endpoint(span_exporter, instrument_no_content): llm_model_value = "gpt-4o-mini" messages_value = [{"role": "user", "content": "Say this is a test"}]