Skip to content

Commit 013bcba

Browse files
committed
100% documentation coverage.
1 parent 0be029f commit 013bcba

File tree

9 files changed

+103
-13
lines changed

9 files changed

+103
-13
lines changed

gems.rb

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,8 @@
77

88
gemspec
99

10+
gem "agent-context"
11+
1012
group :maintenance, optional: true do
1113
gem "bake-gem"
1214
gem "bake-modernize"

lib/async/ollama.rb

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,3 +7,10 @@
77
require_relative "ollama/client"
88

99
require_relative "ollama/conversation"
10+
11+
# @namespace
12+
module Async
13+
# @namespace
14+
module Ollama
15+
end
16+
end

lib/async/ollama/chat.rb

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -8,57 +8,61 @@
88

99
module Async
1010
module Ollama
11+
# Represents a chat response from the Ollama API, including message content, model, and timing information.
1112
class Chat < Async::REST::Representation[ChatWrapper]
12-
# The response message.
13+
# @returns [Hash | nil] The message content, or nil if not present.
1314
def message
1415
self.value[:message]
1516
end
1617

18+
# @returns [String | nil] The error message, or nil if not present.
1719
def error
1820
self.value[:error]
1921
end
2022

23+
# @returns [Array(Hash) | nil] The tool calls, or nil if not present.
2124
def tool_calls
2225
if message = self.message
2326
message[:tool_calls]
2427
end
2528
end
2629

27-
# The model used to generate the response.
30+
# @returns [String] The model name used to generate this response.
2831
def model
2932
self.value[:model]
3033
end
3134

32-
# @return [Integer] The time spent generating the response, in nanoseconds.
35+
# @return [Integer | nil] The time spent generating the response, in nanoseconds.
3336
def total_duration
3437
self.value[:total_duration]
3538
end
3639

37-
# @return [Integer] The time spent loading the model, in nanoseconds.
40+
# @return [Integer | nil] The time spent loading the model, in nanoseconds.
3841
def load_duration
3942
self.value[:load_duration]
4043
end
4144

42-
# @return [Integer] The number of tokens in the prompt (the token count).
45+
# @return [Integer | nil] The number of tokens in the prompt (the token count).
4346
def prompt_eval_count
4447
self.value[:prompt_eval_count]
4548
end
4649

47-
# @return [Integer] The time spent evaluating the prompt, in nanoseconds.
50+
# @return [Integer | nil] The time spent evaluating the prompt, in nanoseconds.
4851
def prompt_eval_duration
4952
self.value[:prompt_eval_duration]
5053
end
5154

52-
# @return [Integer] The number of tokens in the response.
55+
# @return [Integer | nil] The number of tokens in the response.
5356
def eval_count
5457
self.value[:eval_count]
5558
end
5659

57-
# @return [Integer] The time spent generating the response, in nanoseconds.
60+
# @return [Integer | nil] The time spent generating the response, in nanoseconds.
5861
def eval_duration
5962
self.value[:eval_duration]
6063
end
6164

65+
# @return [Integer] The sum of prompt and response token counts.
6266
def token_count
6367
count = 0
6468

lib/async/ollama/client.rb

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -13,13 +13,15 @@ module Async
1313
module Ollama
1414
MODEL = ENV.fetch("ASYNC_OLLAMA_MODEL", "llama3.1:latest")
1515

16-
# Represents a connection to the Ollama service.
16+
# Represents a connection to the Ollama service, providing methods to generate completions, chat, and list models.
1717
class Client < Async::REST::Resource
1818
# The default endpoint to connect to.
1919
ENDPOINT = Async::HTTP::Endpoint.parse("http://localhost:11434")
2020

21-
# Generate a response from the given prompt.
21+
# Generates a response from the given prompt using Ollama.
2222
# @parameter prompt [String] The prompt to generate a response from.
23+
# @parameter options [Hash] Additional options for the request.
24+
# @returns [Generate] The generated response representation.
2325
def generate(prompt, **options, &block)
2426
options[:prompt] = prompt
2527
options[:model] ||= MODEL
@@ -33,6 +35,10 @@ def generate(prompt, **options, &block)
3335
end
3436
end
3537

38+
# Sends a chat request with the given messages to Ollama.
39+
# @parameter messages [Array(Hash)] The chat messages to send.
40+
# @parameter options [Hash] Additional options for the request.
41+
# @returns [Chat] The chat response representation.
3642
def chat(messages, **options, &block)
3743
options[:model] ||= MODEL
3844
options[:messages] = messages
@@ -46,6 +52,8 @@ def chat(messages, **options, &block)
4652
end
4753
end
4854

55+
# Retrieves the list of available models from Ollama.
56+
# @returns [Models] The models response representation.
4957
def models
5058
Models.get(self.with(path: "/api/tags"))
5159
end

lib/async/ollama/conversation.rb

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -8,10 +8,17 @@
88

99
module Async
1010
module Ollama
11+
# Represents a conversation with the Ollama API, managing messages, tool calls, and summarization.
1112
class Conversation
13+
# Raised when a chat error occurs during the conversation.
1214
class ChatError < StandardError
1315
end
1416

17+
# Initializes a new conversation.
18+
# @parameter client [Client] The Ollama client instance.
19+
# @parameter model [String] The model to use for the conversation.
20+
# @parameter messages [Array(Hash)] The initial messages for the conversation.
21+
# @parameter options [Hash] Additional options for the conversation.
1522
def initialize(client, model: MODEL, messages: [], **options)
1623
@client = client
1724
@model = model
@@ -23,18 +30,25 @@ def initialize(client, model: MODEL, messages: [], **options)
2330
@last_response = nil
2431
end
2532

33+
# @attribute [Toolbox] The toolbox for this conversation.
2634
attr :toolbox
2735

36+
# @attribute [Array(Hash)] The messages in the conversation.
2837
attr :messages
2938

39+
# @returns [Integer] The number of messages in the conversation.
3040
def size
3141
@messages.size
3242
end
3343

44+
# @returns [Integer] The token count of the last response, or 0 if none.
3445
def token_count
3546
@last_response&.token_count || 0
3647
end
3748

49+
# Sends a prompt to the conversation and processes the response, including tool calls.
50+
# @parameter prompt [String | Hash] The prompt to send (as a string or message hash).
51+
# @returns [Chat] The final chat response.
3852
def call(prompt, &block)
3953
if prompt.is_a?(String)
4054
@messages << {
@@ -68,6 +82,10 @@ def call(prompt, &block)
6882

6983
SUMMARIZE_MESSAGE = "Please summarize the conversation so far for your future reference. Do not introduce new information or questions. Refer to both user and assistant messages. Please keep the summary concise and relevant to the conversation and use it to continue the conversation."
7084

85+
# Summarizes the conversation and truncates messages to reduce context usage.
86+
# @parameter retain [Integer] The number of messages to retain after summarization.
87+
# @parameter role [String] The role to use for the summarization message.
88+
# @returns [void]
7189
def summarize!(retain = -1, role: "user")
7290
current_size = @messages.size
7391

lib/async/ollama/generate.rb

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -8,13 +8,14 @@
88

99
module Async
1010
module Ollama
11+
# Represents a generated response from the Ollama API.
1112
class Generate < Async::REST::Representation[Wrapper]
12-
# The response to the prompt.
13+
# @returns [String | nil] The generated response, or nil if not present.
1314
def response
1415
self.value[:response]
1516
end
1617

17-
# The model used to generate the response.
18+
# @returns [String] The model name used to generate the response.
1819
def model
1920
self.value[:model]
2021
end

lib/async/ollama/models.rb

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,9 +8,11 @@
88

99
module Async
1010
module Ollama
11+
# Represents the available models returned by the Ollama API.
1112
class Models < Async::REST::Representation[Wrapper]
13+
# @returns [Array(String)] The list of model names.
1214
def names
13-
self.value[:models].map{|model| model[:name]}
15+
self.value[:models].map {|model| model[:name]}
1416
end
1517
end
1618
end

lib/async/ollama/toolbox.rb

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,20 +5,32 @@
55

66
module Async
77
module Ollama
8+
# Represents a tool that can be registered and called by the Toolbox.
89
class Tool
10+
# Initializes a new tool with the given name, schema, and block.
11+
# @parameter name [String] The name of the tool.
12+
# @parameter schema [Hash] The schema describing the tool's function.
13+
# @parameter block [Proc] The implementation of the tool.
914
def initialize(name, schema, &block)
1015
@name = name
1116
@schema = schema
1217
@block = block
1318
end
1419

20+
# @attribute [String] The name of the tool.
1521
attr :name
22+
23+
# @attribute [Hash] The schema for the tool.
1624
attr :schema
1725

26+
# Calls the tool with the given message.
27+
# @parameter message [Hash] The message to process.
28+
# @returns [Object] The result of the tool's block.
1829
def call(message)
1930
@block.call(message)
2031
end
2132

33+
# @returns [Hash] The explanation of the tool's function for API usage.
2234
def explain
2335
{
2436
type: "function",
@@ -27,17 +39,27 @@ def explain
2739
end
2840
end
2941

42+
# Manages a collection of tools and dispatches calls to them.
3043
class Toolbox
44+
# Initializes a new, empty toolbox.
3145
def initialize
3246
@tools = {}
3347
end
3448

49+
# @attribute [Hash] The registered tools by name.
3550
attr :tools
3651

52+
# Registers a new tool with the given name, schema, and block.
53+
# @parameter name [String] The name of the tool.
54+
# @parameter schema [Hash] The schema describing the tool's function.
55+
# @parameter block [Proc] The implementation of the tool.
3756
def register(name, schema, &block)
3857
@tools[name] = Tool.new(name, schema, &block)
3958
end
4059

60+
# Calls a registered tool with the given message.
61+
# @parameter message [Hash] The message containing the function call.
62+
# @returns [Hash] The tool's response message.
4163
def call(message)
4264
function = message[:function]
4365
name = function[:name]
@@ -62,6 +84,7 @@ def call(message)
6284
}
6385
end
6486

87+
# @returns [Array(Hash)] The explanations for all registered tools.
6588
def explain
6689
@tools.values.map(&:explain)
6790
end

lib/async/ollama/wrapper.rb

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,9 @@
1313

1414
module Async
1515
module Ollama
16+
# Parses streaming HTTP responses for Ollama, buffering and extracting JSON lines.
1617
class StreamingParser < ::Protocol::HTTP::Body::Wrapper
18+
# @parameter args [Array] Arguments for the parent initializer.
1719
def initialize(...)
1820
super
1921

@@ -23,6 +25,8 @@ def initialize(...)
2325
@value = {}
2426
end
2527

28+
# Reads the next JSON object from the stream.
29+
# @returns [Hash | nil] The next parsed object, or nil if the stream is empty.
2630
def read
2731
return if @buffer.nil?
2832

@@ -49,21 +53,27 @@ def read
4953
end
5054
end
5155

56+
# Joins the stream, reading all objects and returning the final value.
57+
# @returns [Hash] The final parsed value.
5258
def join
5359
self.each{}
5460

5561
return @value
5662
end
5763
end
5864

65+
# Parses streaming responses for the Ollama API, collecting the response string.
5966
class StreamingResponseParser < StreamingParser
67+
# Initializes the parser with an empty response string.
6068
def initialize(...)
6169
super
6270

6371
@response = String.new
6472
@value[:response] = @response
6573
end
6674

75+
# Iterates over each response line, yielding the response string.
76+
# @returns [Enumerator] Yields each response string.
6777
def each
6878
super do |line|
6979
response = line.delete(:response)
@@ -75,7 +85,9 @@ def each
7585
end
7686
end
7787

88+
# Parses streaming message responses for the Ollama API, collecting message content.
7889
class StreamingMessageParser < StreamingParser
90+
# Initializes the parser with an empty message content.
7991
def initialize(...)
8092
super
8193

@@ -84,6 +96,8 @@ def initialize(...)
8496
@value[:message] = @message
8597
end
8698

99+
# Iterates over each message line, yielding the message content.
100+
# @returns [Enumerator] Yields each message content string.
87101
def each
88102
super do |line|
89103
message = line.delete(:message)
@@ -97,10 +111,14 @@ def each
97111
end
98112
end
99113

114+
# Wraps HTTP requests and responses for the Ollama API, handling content negotiation and parsing.
100115
class Wrapper < Async::REST::Wrapper::Generic
101116
APPLICATION_JSON = "application/json"
102117
APPLICATION_JSON_STREAM = "application/x-ndjson"
103118

119+
# Prepares the HTTP request with appropriate headers and body.
120+
# @parameter request [Protocol::HTTP::Request] The HTTP request object.
121+
# @parameter payload [Protocol::HTTP::Response] The request payload.
104122
def prepare_request(request, payload)
105123
request.headers.add("accept", APPLICATION_JSON)
106124
request.headers.add("accept", APPLICATION_JSON_STREAM)
@@ -114,6 +132,9 @@ def prepare_request(request, payload)
114132
end
115133
end
116134

135+
# Selects the appropriate parser for the HTTP response.
136+
# @parameter response [Protocol::HTTP::Response] The HTTP response object.
137+
# @returns [Class] The parser class to use.
117138
def parser_for(response)
118139
content_type = response.headers["content-type"]
119140
media_type = content_type.split(";").first
@@ -127,7 +148,11 @@ def parser_for(response)
127148
end
128149
end
129150

151+
# Wraps chat-specific HTTP responses for the Ollama API, selecting the appropriate parser.
130152
class ChatWrapper < Wrapper
153+
# Selects the appropriate parser for the chat HTTP response.
154+
# @parameter response [Protocol::HTTP::Response] The HTTP response object.
155+
# @returns [Class] The parser class to use.
131156
def parser_for(response)
132157
content_type = response.headers["content-type"]
133158
media_type = content_type.split(";").first

0 commit comments

Comments
 (0)