Skip to content

Commit

Permalink
feat: add completion_tokens_details to the response
Browse files Browse the repository at this point in the history
  • Loading branch information
default-anton committed Jan 2, 2025
1 parent f2e807d commit fd84c91
Show file tree
Hide file tree
Showing 3 changed files with 150 additions and 2 deletions.
25 changes: 23 additions & 2 deletions lib/onellm/response.rb
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,25 @@ def to_h
end
end

# Represents completion tokens details
class CompletionTokensDetails
attr_reader :reasoning_tokens, :accepted_prediction_tokens, :rejected_prediction_tokens

def initialize(attributes = {})
@reasoning_tokens = attributes[:reasoning_tokens]
@accepted_prediction_tokens = attributes[:accepted_prediction_tokens]
@rejected_prediction_tokens = attributes[:rejected_prediction_tokens]
end

def to_h
{
reasoning_tokens: reasoning_tokens,
accepted_prediction_tokens: accepted_prediction_tokens,
rejected_prediction_tokens: rejected_prediction_tokens
}
end
end

# Represents the token usage information
class Usage
attr_reader :completion_tokens, :prompt_tokens, :total_tokens,
Expand All @@ -172,7 +191,9 @@ def initialize(attributes = {})
@completion_tokens = attributes[:completion_tokens]
@prompt_tokens = attributes[:prompt_tokens]
@total_tokens = attributes[:total_tokens]
@completion_tokens_details = attributes[:completion_tokens_details]
@completion_tokens_details = if attributes[:completion_tokens_details]
CompletionTokensDetails.new(attributes[:completion_tokens_details])
end
@prompt_tokens_details = attributes[:prompt_tokens_details] || {}
@cache_creation_input_tokens = attributes[:cache_creation_input_tokens]
@cache_read_input_tokens = attributes[:cache_read_input_tokens]
Expand All @@ -183,7 +204,7 @@ def to_h
completion_tokens: completion_tokens,
prompt_tokens: prompt_tokens,
total_tokens: total_tokens,
completion_tokens_details: completion_tokens_details,
completion_tokens_details: completion_tokens_details&.to_h,
prompt_tokens_details: prompt_tokens_details,
cache_creation_input_tokens: cache_creation_input_tokens,
cache_read_input_tokens: cache_read_input_tokens
Expand Down

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

13 changes: 13 additions & 0 deletions spec/providers/openai_provider_spec.rb
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,19 @@
expect(response.choices.first.logprobs.content.first.top_logprobs).to be_an(Array)
end

it 'returns completion tokens details', :vcr do
response = provider.complete(
model: valid_model,
messages: valid_messages
)

expect(response).to be_a(Onellm::Response)
expect(response.usage.completion_tokens_details).to be_a(Onellm::CompletionTokensDetails)
expect(response.usage.completion_tokens_details.reasoning_tokens).to be_a(Integer)
expect(response.usage.completion_tokens_details.accepted_prediction_tokens).to be_a(Integer)
expect(response.usage.completion_tokens_details.rejected_prediction_tokens).to be_a(Integer)
end

it 'raises error for invalid top_p' do
expect do
provider.complete(
Expand Down

0 comments on commit fd84c91

Please sign in to comment.