From 256a0d1dacd502b5721468a9896a7b95c464274f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 13:31:31 +0100 Subject: [PATCH 01/42] feat: support running ollama from the local binary --- .../modules/ollama/install-dependencies.sh | 3 + .github/workflows/ci-test-go.yml | 11 + docs/modules/ollama.md | 38 ++ modules/ollama/examples_test.go | 70 +++ modules/ollama/go.mod | 2 +- modules/ollama/local.go | 479 ++++++++++++++++++ modules/ollama/local_test.go | 241 +++++++++ modules/ollama/ollama.go | 24 +- modules/ollama/options.go | 41 ++ modules/ollama/options_test.go | 41 ++ 10 files changed, 948 insertions(+), 2 deletions(-) create mode 100755 .github/scripts/modules/ollama/install-dependencies.sh create mode 100644 modules/ollama/local.go create mode 100644 modules/ollama/local_test.go create mode 100644 modules/ollama/options_test.go diff --git a/.github/scripts/modules/ollama/install-dependencies.sh b/.github/scripts/modules/ollama/install-dependencies.sh new file mode 100755 index 0000000000..f041595b4b --- /dev/null +++ b/.github/scripts/modules/ollama/install-dependencies.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +curl -fsSL https://ollama.com/install.sh | sh diff --git a/.github/workflows/ci-test-go.yml b/.github/workflows/ci-test-go.yml index 82be78435f..3af552f768 100644 --- a/.github/workflows/ci-test-go.yml +++ b/.github/workflows/ci-test-go.yml @@ -107,6 +107,17 @@ jobs: working-directory: ./${{ inputs.project-directory }} run: go build + - name: Install dependencies + working-directory: ./${{ inputs.project-directory }} + shell: bash + run: | + SCRIPT_PATH="./.github/scripts/${{ inputs.project-directory }}/install-dependencies.sh" + if [ -f "$SCRIPT_PATH" ]; then + bash "$SCRIPT_PATH" + else + echo "No dependencies script found at $SCRIPT_PATH - skipping installation" + fi + - name: go test # only run tests on linux, there are a number of things that won't allow the tests to run on anything else # many (maybe, all?) images used can only be build on Linux, they don't have Windows in their manifest, and diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index c16e612142..ec2b61f789 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -16,10 +16,15 @@ go get github.com/testcontainers/testcontainers-go/modules/ollama ## Usage example +The module allows you to run the Ollama container or the local Ollama binary. + [Creating a Ollama container](../../modules/ollama/examples_test.go) inside_block:runOllamaContainer +[Running the local Ollama binary](../../modules/ollama/examples_test.go) inside_block:localOllama +If the local Ollama binary fails to execute, the module will fallback to the container version of Ollama. + ## Module Reference ### Run function @@ -48,6 +53,39 @@ When starting the Ollama container, you can pass options in a variadic way to co If you need to set a different Ollama Docker image, you can set a valid Docker image as the second argument in the `Run` function. E.g. `Run(context.Background(), "ollama/ollama:0.1.25")`. +#### Use Local + +- Not available until the next release of testcontainers-go :material-tag: main + +If you need to run the local Ollama binary, you can set the `UseLocal` option in the `Run` function. +This option accepts a list of environment variables as a string, that will be applied to the Ollama binary when executing commands. + +E.g. `Run(context.Background(), "ollama/ollama:0.1.25", WithUseLocal("OLLAMA_DEBUG=true"))`. + +All the container methods are available when using the local Ollama binary, but will be executed locally instead of inside the container. +Please consider the following differences when using the local Ollama binary: + +- The local Ollama binary will create a log file in the current working directory, identified by the session ID. E.g. `local-ollama-.log`. +- `ConnectionString` returns the connection string to connect to the local Ollama binary instead of the container, which maps to `127.0.0.1:11434`. +- `ContainerIP` returns `127.0.0.1`. +- `ContainerIPs` returns `["127.0.0.1"]`. +- `CopyToContainer`, `CopyDirToContainer`, `CopyFileToContainer` and `CopyFileFromContainer` don't perform any action. +- `GetLogProductionErrorChannel` returns a nil channel. +- `Endpoint` returns the endpoint to connect to the local Ollama binary instead of the container, which maps to `127.0.0.1:11434`. +- `Exec` passes the command to the local Ollama binary instead of inside the container. First argument is the command to execute, and the second argument is the list of arguments. +- `GetContainerID` returns the container ID of the local Ollama binary instead of the container, which maps to `local-ollama-`. +- `Host` returns `127.0.0.1`. +- `Inspect` returns a ContainerJSON with the state of the local Ollama binary. +- `IsRunning` returns true if the local Ollama binary process is running. +- `Logs` returns the logs from the local Ollama binary instead of the container. +- `MappedPort` returns the port mapping for the local Ollama binary instead of the container. +- `Start` starts the local Ollama binary process. +- `State` returns the current state of the local Ollama binary process, `stopped` or `running`. +- `Stop` stops the local Ollama binary process. +- `Terminate` calls the `Stop` method and then removes the log file. + +The local Ollama binary will create a log file in the current working directory, and it will be available in the container's `Logs` method. + {% include "../features/common_functional_options.md" %} ### Container Methods diff --git a/modules/ollama/examples_test.go b/modules/ollama/examples_test.go index 741db846be..188be45bbb 100644 --- a/modules/ollama/examples_test.go +++ b/modules/ollama/examples_test.go @@ -173,3 +173,73 @@ func ExampleRun_withModel_llama2_langchain() { // Intentionally not asserting the output, as we don't want to run this example in the tests. } + +func ExampleRun_withLocal() { + ctx := context.Background() + + // localOllama { + ollamaContainer, err := tcollama.Run(ctx, "ollama/ollama:0.3.13", tcollama.WithUseLocal("OLLAMA_DEBUG=true")) + defer func() { + if err := testcontainers.TerminateContainer(ollamaContainer); err != nil { + log.Printf("failed to terminate container: %s", err) + } + }() + if err != nil { + log.Printf("failed to start container: %s", err) + return + } + // } + + model := "llama3.2:1b" + + _, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "pull", model}) + if err != nil { + log.Printf("failed to pull model %s: %s", model, err) + return + } + + _, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "run", model}) + if err != nil { + log.Printf("failed to run model %s: %s", model, err) + return + } + + connectionStr, err := ollamaContainer.ConnectionString(ctx) + if err != nil { + log.Printf("failed to get connection string: %s", err) + return + } + + var llm *langchainollama.LLM + if llm, err = langchainollama.New( + langchainollama.WithModel(model), + langchainollama.WithServerURL(connectionStr), + ); err != nil { + log.Printf("failed to create langchain ollama: %s", err) + return + } + + completion, err := llm.Call( + context.Background(), + "how can Testcontainers help with testing?", + llms.WithSeed(42), // the lower the seed, the more deterministic the completion + llms.WithTemperature(0.0), // the lower the temperature, the more creative the completion + ) + if err != nil { + log.Printf("failed to create langchain ollama: %s", err) + return + } + + words := []string{ + "easy", "isolation", "consistency", + } + lwCompletion := strings.ToLower(completion) + + for _, word := range words { + if strings.Contains(lwCompletion, word) { + fmt.Println(true) + } + } + + // Intentionally not asserting the output, as we don't want to run this example in the tests. +} diff --git a/modules/ollama/go.mod b/modules/ollama/go.mod index e22b801031..2aab83b978 100644 --- a/modules/ollama/go.mod +++ b/modules/ollama/go.mod @@ -4,6 +4,7 @@ go 1.22 require ( github.com/docker/docker v27.1.1+incompatible + github.com/docker/go-connections v0.5.0 github.com/google/uuid v1.6.0 github.com/stretchr/testify v1.9.0 github.com/testcontainers/testcontainers-go v0.34.0 @@ -22,7 +23,6 @@ require ( github.com/davecgh/go-spew v1.1.1 // indirect github.com/distribution/reference v0.6.0 // indirect github.com/dlclark/regexp2 v1.8.1 // indirect - github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect github.com/go-logr/logr v1.4.1 // indirect diff --git a/modules/ollama/local.go b/modules/ollama/local.go new file mode 100644 index 0000000000..cd14c03884 --- /dev/null +++ b/modules/ollama/local.go @@ -0,0 +1,479 @@ +package ollama + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "os/exec" + "sync" + "syscall" + "time" + + "github.com/docker/docker/api/types" + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + "github.com/docker/go-connections/nat" + + "github.com/testcontainers/testcontainers-go" + tcexec "github.com/testcontainers/testcontainers-go/exec" + "github.com/testcontainers/testcontainers-go/wait" +) + +const localIP = "127.0.0.1" + +var defaultStopTimeout = time.Second * 5 + +// localContext is a type holding the context for local Ollama executions. +type localContext struct { + useLocal bool + env []string + serveCmd *exec.Cmd + logFile *os.File + mx sync.Mutex +} + +// runLocal calls the local Ollama binary instead of using a Docker container. +func runLocal(env map[string]string) (*OllamaContainer, error) { + // Apply the environment variables to the command. + cmdEnv := []string{} + for k, v := range env { + cmdEnv = append(cmdEnv, fmt.Sprintf("%s=%s", k, v)) + } + + c := &OllamaContainer{ + localCtx: &localContext{ + useLocal: true, + env: cmdEnv, + }, + } + + c.localCtx.mx.Lock() + + serveCmd, logFile, err := startOllama(context.Background(), c.localCtx) + if err != nil { + return nil, fmt.Errorf("start ollama: %w", err) + } + + c.localCtx.serveCmd = serveCmd + c.localCtx.logFile = logFile + c.localCtx.mx.Unlock() + // Wait until the Ollama process is ready, checking that the log file contains + // the "Listening on 127.0.0.1:11434" message + err = wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(context.Background(), c) + if err != nil { + return nil, fmt.Errorf("wait for ollama to start: %w", err) + } + + return c, nil +} + +// logFile returns an existing log file or creates a new one if it doesn't exist. +func logFile() (*os.File, error) { + logName := "local-ollama-" + testcontainers.SessionID() + ".log" + if _, err := os.Stat(logName); err == nil { + return os.Open(logName) + } + + file, err := os.Create(logName) + if err != nil { + return nil, fmt.Errorf("create ollama log file: %w", err) + } + + return file, nil +} + +// startOllama starts the Ollama serve command in the background, writing to the +// provided log file. +func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.File, error) { + serveCmd := exec.CommandContext(ctx, "ollama", "serve") + serveCmd.Env = append(serveCmd.Env, localCtx.env...) + serveCmd.Env = append(serveCmd.Env, os.Environ()...) + + logFile, err := logFile() + if err != nil { + return nil, nil, fmt.Errorf("ollama log file: %w", err) + } + + serveCmd.Stdout = logFile + serveCmd.Stderr = logFile + + // Run the ollama serve command in background + err = serveCmd.Start() + if err != nil { + return nil, nil, fmt.Errorf("start ollama serve: %w", err) + } + + return serveCmd, logFile, nil +} + +// ContainerIP returns the IP address of the local Ollama binary. +func (c *OllamaContainer) ContainerIP(ctx context.Context) (string, error) { + if !c.localCtx.useLocal { + return c.Container.ContainerIP(ctx) + } + + return localIP, nil +} + +// ContainerIPs returns a slice with the IP address of the local Ollama binary. +func (c *OllamaContainer) ContainerIPs(ctx context.Context) ([]string, error) { + if !c.localCtx.useLocal { + return c.Container.ContainerIPs(ctx) + } + + return []string{localIP}, nil +} + +// CopyToContainer is a no-op for the local Ollama binary. +func (c *OllamaContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error { + if !c.localCtx.useLocal { + return c.Container.CopyToContainer(ctx, fileContent, containerFilePath, fileMode) + } + + return nil +} + +// CopyDirToContainer is a no-op for the local Ollama binary. +func (c *OllamaContainer) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error { + if !c.localCtx.useLocal { + return c.Container.CopyDirToContainer(ctx, hostDirPath, containerParentPath, fileMode) + } + + return nil +} + +// CopyFileToContainer is a no-op for the local Ollama binary. +func (c *OllamaContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error { + if !c.localCtx.useLocal { + return c.Container.CopyFileToContainer(ctx, hostFilePath, containerFilePath, fileMode) + } + + return nil +} + +// CopyFileFromContainer is a no-op for the local Ollama binary. +func (c *OllamaContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) { + if !c.localCtx.useLocal { + return c.Container.CopyFileFromContainer(ctx, filePath) + } + + return nil, nil +} + +// GetLogProductionErrorChannel returns a nil channel. +func (c *OllamaContainer) GetLogProductionErrorChannel() <-chan error { + if !c.localCtx.useLocal { + return c.Container.GetLogProductionErrorChannel() + } + + return nil +} + +// Endpoint returns the 127.0.0.1:11434 endpoint for the local Ollama binary. +func (c *OllamaContainer) Endpoint(ctx context.Context, port string) (string, error) { + if !c.localCtx.useLocal { + return c.Container.Endpoint(ctx, port) + } + + return localIP + ":11434", nil +} + +// Exec executes a command using the local Ollama binary. +func (c *OllamaContainer) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { + if !c.localCtx.useLocal { + return c.Container.Exec(ctx, cmd, options...) + } + + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + args := []string{} + if len(cmd) > 1 { + args = cmd[1:] // prevent when there is only one command + } + + command := prepareExec(ctx, cmd[0], args, c.localCtx.env, c.localCtx.logFile) + err := command.Run() + if err != nil { + return command.ProcessState.ExitCode(), c.localCtx.logFile, fmt.Errorf("exec %v: %w", cmd, err) + } + + return command.ProcessState.ExitCode(), c.localCtx.logFile, nil +} + +func prepareExec(ctx context.Context, bin string, args []string, env []string, output io.Writer) *exec.Cmd { + command := exec.CommandContext(ctx, bin, args...) + command.Env = append(command.Env, env...) + command.Env = append(command.Env, os.Environ()...) + + command.Stdout = output + command.Stderr = output + + return command +} + +// GetContainerID returns a placeholder ID for local execution +func (c *OllamaContainer) GetContainerID() string { + if !c.localCtx.useLocal { + return c.Container.GetContainerID() + } + + return "local-ollama-" + testcontainers.SessionID() +} + +// Host returns the 127.0.0.1 address for the local Ollama binary. +func (c *OllamaContainer) Host(ctx context.Context) (string, error) { + if !c.localCtx.useLocal { + return c.Container.Host(ctx) + } + + return localIP, nil +} + +// Inspect returns a ContainerJSON with the state of the local Ollama binary. +// The version is read from the local Ollama binary (ollama -v), and the port +// mapping is set to 11434. +func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, error) { + if !c.localCtx.useLocal { + return c.Container.Inspect(ctx) + } + + state, err := c.State(ctx) + if err != nil { + return nil, fmt.Errorf("get ollama state: %w", err) + } + + // read the version from the ollama binary + buf := &bytes.Buffer{} + command := prepareExec(ctx, "ollama", []string{"-v"}, c.localCtx.env, buf) + err = command.Run() + if err != nil { + return nil, fmt.Errorf("read ollama -v output: %w", err) + } + + bs, err := io.ReadAll(buf) + if err != nil { + return nil, fmt.Errorf("read ollama -v output: %w", err) + } + + return &types.ContainerJSON{ + ContainerJSONBase: &types.ContainerJSONBase{ + ID: c.GetContainerID(), + Name: "local-ollama-" + testcontainers.SessionID(), + State: state, + }, + Config: &container.Config{ + Image: string(bs), + ExposedPorts: nat.PortSet{ + "11434/tcp": struct{}{}, + }, + Hostname: "localhost", + Entrypoint: []string{"ollama", "serve"}, + }, + NetworkSettings: &types.NetworkSettings{ + Networks: map[string]*network.EndpointSettings{}, + NetworkSettingsBase: types.NetworkSettingsBase{ + Bridge: "bridge", + Ports: nat.PortMap{ + "11434/tcp": { + {HostIP: localIP, HostPort: "11434"}, + }, + }, + }, + DefaultNetworkSettings: types.DefaultNetworkSettings{ + IPAddress: localIP, + }, + }, + }, nil +} + +// IsRunning returns true if the local Ollama process is running. +func (c *OllamaContainer) IsRunning() bool { + if !c.localCtx.useLocal { + return c.Container.IsRunning() + } + + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + return c.localCtx.serveCmd != nil +} + +// Logs returns the logs from the local Ollama binary. +func (c *OllamaContainer) Logs(ctx context.Context) (io.ReadCloser, error) { + if !c.localCtx.useLocal { + return c.Container.Logs(ctx) + } + + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + // stream the log file + return os.Open(c.localCtx.logFile.Name()) +} + +// MappedPort returns the configured port for local Ollama binary. +func (c *OllamaContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { + if !c.localCtx.useLocal { + return c.Container.MappedPort(ctx, port) + } + + // Ollama typically uses port 11434 by default + return "11434/tcp", nil +} + +// Networks returns the networks for local Ollama binary, which is empty. +func (c *OllamaContainer) Networks(ctx context.Context) ([]string, error) { + if !c.localCtx.useLocal { + return c.Container.Networks(ctx) + } + + return []string{}, nil +} + +// NetworkAliases returns the network aliases for local Ollama binary, which is empty. +func (c *OllamaContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { + if !c.localCtx.useLocal { + return c.Container.NetworkAliases(ctx) + } + + return map[string][]string{}, nil +} + +// SessionID returns the session ID for local Ollama binary, which is the session ID +// of the test execution. +func (c *OllamaContainer) SessionID() string { + if !c.localCtx.useLocal { + return c.Container.SessionID() + } + + return testcontainers.SessionID() +} + +// Start starts the local Ollama process, not failing if it's already running. +func (c *OllamaContainer) Start(ctx context.Context) error { + if !c.localCtx.useLocal { + return c.Container.Start(ctx) + } + + c.localCtx.mx.Lock() + + if c.localCtx.serveCmd != nil { + c.localCtx.mx.Unlock() + return nil + } + + testcontainers.Logger.Printf("starting ollama") + + serveCmd, logFile, err := startOllama(context.Background(), c.localCtx) + if err != nil { + c.localCtx.mx.Unlock() + return fmt.Errorf("start ollama: %w", err) + } + c.localCtx.serveCmd = serveCmd + c.localCtx.logFile = logFile + c.localCtx.mx.Unlock() // unlock before waiting for the process to be ready + + // Wait until the Ollama process is ready, checking that the log file contains + // the "Listening on 127.0.0.1:11434" message + err = wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(context.Background(), c) + if err != nil { + return fmt.Errorf("wait for ollama to start: %w", err) + } + + testcontainers.Logger.Printf("ollama started") + + return nil +} + +// State returns the current state of the Ollama process, simulating a container state +// for local execution. +func (c *OllamaContainer) State(ctx context.Context) (*types.ContainerState, error) { + if !c.localCtx.useLocal { + return c.Container.State(ctx) + } + + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + if c.localCtx.serveCmd == nil { + return &types.ContainerState{Status: "stopped"}, nil + } + + // Check if process is still running. Signal(0) is a special case in Unix-like systems. + // When you send signal 0 to a process: + // - It performs all the normal error checking (permissions, process existence, etc.) + // - But it doesn't actually send any signal to the process + if err := c.localCtx.serveCmd.Process.Signal(syscall.Signal(0)); err != nil { + return &types.ContainerState{Status: "stopped"}, nil + } + + // Setting the Running field because it's required by the wait strategy + // to check if the given log message is present. + return &types.ContainerState{Status: "running", Running: true}, nil +} + +// Stop gracefully stops the local Ollama process +func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { + if !c.localCtx.useLocal { + return c.Container.Stop(ctx, d) + } + + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + testcontainers.Logger.Printf("stopping ollama") + + if c.localCtx.serveCmd == nil { + return nil + } + + if err := c.localCtx.serveCmd.Process.Signal(syscall.Signal(syscall.SIGTERM)); err != nil { + return fmt.Errorf("signal ollama: %w", err) + } + + c.localCtx.serveCmd = nil + + testcontainers.Logger.Printf("ollama stopped") + + return nil +} + +// Terminate stops the local Ollama process, removing the log file. +func (c *OllamaContainer) Terminate(ctx context.Context) (err error) { + if !c.localCtx.useLocal { + return c.Container.Terminate(ctx) + } + + // First try to stop gracefully + err = c.Stop(ctx, &defaultStopTimeout) + if err != nil { + return fmt.Errorf("stop ollama: %w", err) + } + + defer func() { + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + if c.localCtx.logFile == nil { + return + } + + // remove the log file if it exists + if _, err := os.Stat(c.localCtx.logFile.Name()); err == nil { + err = c.localCtx.logFile.Close() + if err != nil { + return + } + + err = os.Remove(c.localCtx.logFile.Name()) + if err != nil { + return + } + } + }() + + return nil +} diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go new file mode 100644 index 0000000000..c8d75ce6a5 --- /dev/null +++ b/modules/ollama/local_test.go @@ -0,0 +1,241 @@ +package ollama_test + +import ( + "context" + "io" + "os" + "os/exec" + "testing" + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/stretchr/testify/require" + + "github.com/testcontainers/testcontainers-go" + tcexec "github.com/testcontainers/testcontainers-go/exec" + "github.com/testcontainers/testcontainers-go/modules/ollama" +) + +func TestRun_local(t *testing.T) { + // check if the local ollama binary is available + if _, err := exec.LookPath("ollama"); err != nil { + t.Skip("local ollama binary not found, skipping") + } + + ctx := context.Background() + + ollamaContainer, err := ollama.Run( + ctx, + "ollama/ollama:0.1.25", + ollama.WithUseLocal("FOO=BAR"), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + + t.Run("connection-string", func(t *testing.T) { + connectionStr, err := ollamaContainer.ConnectionString(ctx) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:11434", connectionStr) + }) + + t.Run("container-id", func(t *testing.T) { + id := ollamaContainer.GetContainerID() + require.Equal(t, "local-ollama-"+testcontainers.SessionID(), id) + }) + + t.Run("container-ips", func(t *testing.T) { + ip, err := ollamaContainer.ContainerIP(ctx) + require.NoError(t, err) + require.Equal(t, "127.0.0.1", ip) + + ips, err := ollamaContainer.ContainerIPs(ctx) + require.NoError(t, err) + require.Equal(t, []string{"127.0.0.1"}, ips) + }) + + t.Run("copy", func(t *testing.T) { + err := ollamaContainer.CopyToContainer(ctx, []byte("test"), "/tmp", 0o755) + require.NoError(t, err) + + err = ollamaContainer.CopyDirToContainer(ctx, ".", "/tmp", 0o755) + require.NoError(t, err) + + err = ollamaContainer.CopyFileToContainer(ctx, ".", "/tmp", 0o755) + require.NoError(t, err) + + reader, err := ollamaContainer.CopyFileFromContainer(ctx, "/tmp") + require.NoError(t, err) + require.Nil(t, reader) + }) + + t.Run("log-production-error-channel", func(t *testing.T) { + ch := ollamaContainer.GetLogProductionErrorChannel() + require.Nil(t, ch) + }) + + t.Run("endpoint", func(t *testing.T) { + endpoint, err := ollamaContainer.Endpoint(ctx, "88888/tcp") + require.NoError(t, err) + require.Equal(t, "127.0.0.1:11434", endpoint) + }) + + t.Run("exec/pull-and-run-model", func(t *testing.T) { + const model = "llama3.2:1b" + + code, r, err := ollamaContainer.Exec(ctx, []string{"ollama", "pull", model}) + require.NoError(t, err) + require.Equal(t, 0, code) + + bs, err := io.ReadAll(r) + require.NoError(t, err) + require.Empty(t, bs) + + code, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "run", model}, tcexec.Multiplexed()) + require.NoError(t, err) + require.Equal(t, 0, code) + + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + defer logs.Close() + + bs, err = io.ReadAll(logs) + require.NoError(t, err) + require.Contains(t, string(bs), "llama runner started") + }) + + t.Run("is-running", func(t *testing.T) { + require.True(t, ollamaContainer.IsRunning()) + + err = ollamaContainer.Stop(ctx, nil) + require.NoError(t, err) + + require.False(t, ollamaContainer.IsRunning()) + + // return it to the running state + err = ollamaContainer.Start(ctx) + require.NoError(t, err) + + require.True(t, ollamaContainer.IsRunning()) + }) + + t.Run("host", func(t *testing.T) { + host, err := ollamaContainer.Host(ctx) + require.NoError(t, err) + require.Equal(t, "127.0.0.1", host) + }) + + t.Run("inspect", func(t *testing.T) { + inspect, err := ollamaContainer.Inspect(ctx) + require.NoError(t, err) + + require.Equal(t, "local-ollama-"+testcontainers.SessionID(), inspect.ContainerJSONBase.ID) + require.Equal(t, "local-ollama-"+testcontainers.SessionID(), inspect.ContainerJSONBase.Name) + require.True(t, inspect.ContainerJSONBase.State.Running) + + require.Contains(t, string(inspect.Config.Image), "ollama version is") + _, exists := inspect.Config.ExposedPorts["11434/tcp"] + require.True(t, exists) + require.Equal(t, "localhost", inspect.Config.Hostname) + require.Equal(t, strslice.StrSlice(strslice.StrSlice{"ollama", "serve"}), inspect.Config.Entrypoint) + + require.Empty(t, inspect.NetworkSettings.Networks) + require.Equal(t, "bridge", inspect.NetworkSettings.NetworkSettingsBase.Bridge) + + ports := inspect.NetworkSettings.NetworkSettingsBase.Ports + _, exists = ports["11434/tcp"] + require.True(t, exists) + + require.Equal(t, "127.0.0.1", inspect.NetworkSettings.Ports["11434/tcp"][0].HostIP) + require.Equal(t, "11434", inspect.NetworkSettings.Ports["11434/tcp"][0].HostPort) + }) + + t.Run("logfile", func(t *testing.T) { + openFile, err := os.Open("local-ollama-" + testcontainers.SessionID() + ".log") + require.NoError(t, err) + require.NotNil(t, openFile) + require.NoError(t, openFile.Close()) + }) + + t.Run("logs", func(t *testing.T) { + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + defer logs.Close() + + bs, err := io.ReadAll(logs) + require.NoError(t, err) + + require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") + }) + + t.Run("mapped-port", func(t *testing.T) { + port, err := ollamaContainer.MappedPort(ctx, "11434/tcp") + require.NoError(t, err) + require.Equal(t, "11434", port.Port()) + require.Equal(t, "tcp", port.Proto()) + }) + + t.Run("networks", func(t *testing.T) { + networks, err := ollamaContainer.Networks(ctx) + require.NoError(t, err) + require.Empty(t, networks) + }) + + t.Run("network-aliases", func(t *testing.T) { + aliases, err := ollamaContainer.NetworkAliases(ctx) + require.NoError(t, err) + require.Empty(t, aliases) + }) + + t.Run("session-id", func(t *testing.T) { + id := ollamaContainer.SessionID() + require.Equal(t, testcontainers.SessionID(), id) + }) + + t.Run("stop-start", func(t *testing.T) { + d := time.Second * 5 + + err := ollamaContainer.Stop(ctx, &d) + require.NoError(t, err) + + state, err := ollamaContainer.State(ctx) + require.NoError(t, err) + require.Equal(t, "stopped", state.Status) + + err = ollamaContainer.Start(ctx) + require.NoError(t, err) + + state, err = ollamaContainer.State(ctx) + require.NoError(t, err) + require.Equal(t, "running", state.Status) + + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + defer logs.Close() + + bs, err := io.ReadAll(logs) + require.NoError(t, err) + + require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") + }) + + t.Run("start-start", func(t *testing.T) { + state, err := ollamaContainer.State(ctx) + require.NoError(t, err) + require.Equal(t, "running", state.Status) + + err = ollamaContainer.Start(ctx) + require.NoError(t, err) + }) + + t.Run("terminate", func(t *testing.T) { + err := ollamaContainer.Terminate(ctx) + require.NoError(t, err) + + _, err = os.Stat("ollama-" + testcontainers.SessionID() + ".log") + require.True(t, os.IsNotExist(err)) + + state, err := ollamaContainer.State(ctx) + require.NoError(t, err) + require.Equal(t, "stopped", state.Status) + }) +} diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index 203d80103f..2f8d7e396d 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -20,11 +20,16 @@ const DefaultOllamaImage = "ollama/ollama:0.1.25" // OllamaContainer represents the Ollama container type used in the module type OllamaContainer struct { testcontainers.Container + localCtx *localContext } // ConnectionString returns the connection string for the Ollama container, // using the default port 11434. func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) { + if c.localCtx.useLocal { + return "http://127.0.0.1:11434", nil + } + host, err := c.Host(ctx) if err != nil { return "", err @@ -43,6 +48,10 @@ func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) // of the container into a new image with the given name, so it doesn't override existing images. // It should be used for creating an image that contains a loaded model. func (c *OllamaContainer) Commit(ctx context.Context, targetImage string) error { + if c.localCtx.useLocal { + return nil + } + cli, err := testcontainers.NewDockerClientWithOpts(context.Background()) if err != nil { return err @@ -94,16 +103,29 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom // always request a GPU if the host supports it opts = append(opts, withGpu()) + options := defaultOptions() for _, opt := range opts { if err := opt.Customize(&genericContainerReq); err != nil { return nil, fmt.Errorf("customize: %w", err) } + if _, ok := opt.(UseLocal); ok { + options.useLocal = true + } + } + + if options.useLocal { + container, err := runLocal(req.Env) + if err == nil { + return container, nil + } + + testcontainers.Logger.Printf("failed to run local ollama: %v, switching to docker", err) } container, err := testcontainers.GenericContainer(ctx, genericContainerReq) var c *OllamaContainer if container != nil { - c = &OllamaContainer{Container: container} + c = &OllamaContainer{Container: container, localCtx: &localContext{useLocal: false}} } if err != nil { diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 605768a379..82191e66f3 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -2,12 +2,22 @@ package ollama import ( "context" + "fmt" + "strings" "github.com/docker/docker/api/types/container" "github.com/testcontainers/testcontainers-go" ) +type options struct { + useLocal bool +} + +func defaultOptions() options { + return options{} +} + var noopCustomizeRequestOption = func(req *testcontainers.GenericContainerRequest) error { return nil } // withGpu requests a GPU for the container, which could improve performance for some models. @@ -37,3 +47,34 @@ func withGpu() testcontainers.CustomizeRequestOption { } }) } + +var _ testcontainers.ContainerCustomizer = (*UseLocal)(nil) + +// UseLocal will use the local Ollama instance instead of pulling the Docker image. +type UseLocal struct { + env []string +} + +// WithUseLocal the module will use the local Ollama instance instead of pulling the Docker image. +// Pass the environment variables you need to set for the Ollama binary to be used, +// in the format of "KEY=VALUE". KeyValue pairs with the wrong format will cause an error. +func WithUseLocal(keyVal ...string) UseLocal { + return UseLocal{env: keyVal} +} + +// Customize implements the ContainerCustomizer interface, taking the key value pairs +// and setting them as environment variables for the Ollama binary. +// In the case of an invalid key value pair, an error is returned. +func (u UseLocal) Customize(req *testcontainers.GenericContainerRequest) error { + env := make(map[string]string) + for _, kv := range u.env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid environment variable: %s", kv) + } + + env[parts[0]] = parts[1] + } + + return testcontainers.WithEnv(env)(req) +} diff --git a/modules/ollama/options_test.go b/modules/ollama/options_test.go new file mode 100644 index 0000000000..67d33e5732 --- /dev/null +++ b/modules/ollama/options_test.go @@ -0,0 +1,41 @@ +package ollama_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/ollama" +) + +func TestWithUseLocal(t *testing.T) { + req := testcontainers.GenericContainerRequest{} + + t.Run("keyVal/valid", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models") + err := opt.Customize(&req) + require.NoError(t, err) + require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) + }) + + t.Run("keyVal/invalid", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS") + err := opt.Customize(&req) + require.Error(t, err) + }) + + t.Run("keyVal/valid/multiple", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST=localhost") + err := opt.Customize(&req) + require.NoError(t, err) + require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) + require.Equal(t, "localhost", req.Env["OLLAMA_HOST"]) + }) + + t.Run("keyVal/invalid/multiple", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST") + err := opt.Customize(&req) + require.Error(t, err) + }) +} From c43e5aff9e3b723e783e727e6ff8d260062675cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 15:58:01 +0100 Subject: [PATCH 02/42] fix: wrong working dir at CI --- .github/workflows/ci-test-go.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/ci-test-go.yml b/.github/workflows/ci-test-go.yml index 3af552f768..b2bedb6836 100644 --- a/.github/workflows/ci-test-go.yml +++ b/.github/workflows/ci-test-go.yml @@ -108,7 +108,6 @@ jobs: run: go build - name: Install dependencies - working-directory: ./${{ inputs.project-directory }} shell: bash run: | SCRIPT_PATH="./.github/scripts/${{ inputs.project-directory }}/install-dependencies.sh" From fa2b3450ceae00e9d1a0205c77b11a392243255e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 17:32:16 +0100 Subject: [PATCH 03/42] chore: extract wait to a function --- modules/ollama/local.go | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index cd14c03884..7300b4dd0f 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -59,9 +59,11 @@ func runLocal(env map[string]string) (*OllamaContainer, error) { c.localCtx.serveCmd = serveCmd c.localCtx.logFile = logFile c.localCtx.mx.Unlock() - // Wait until the Ollama process is ready, checking that the log file contains - // the "Listening on 127.0.0.1:11434" message - err = wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(context.Background(), c) + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + + err = waitForOllama(ctx, c) if err != nil { return nil, fmt.Errorf("wait for ollama to start: %w", err) } @@ -108,6 +110,17 @@ func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.Fi return serveCmd, logFile, nil } +// Wait until the Ollama process is ready, checking that the log file contains +// the "Listening on 127.0.0.1:11434" message +func waitForOllama(ctx context.Context, c *OllamaContainer) error { + err := wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(ctx, c) + if err != nil { + return fmt.Errorf("wait for ollama to start: %w", err) + } + + return nil +} + // ContainerIP returns the IP address of the local Ollama binary. func (c *OllamaContainer) ContainerIP(ctx context.Context) (string, error) { if !c.localCtx.useLocal { @@ -376,9 +389,10 @@ func (c *OllamaContainer) Start(ctx context.Context) error { c.localCtx.logFile = logFile c.localCtx.mx.Unlock() // unlock before waiting for the process to be ready - // Wait until the Ollama process is ready, checking that the log file contains - // the "Listening on 127.0.0.1:11434" message - err = wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(context.Background(), c) + waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + err = waitForOllama(waitCtx, c) if err != nil { return fmt.Errorf("wait for ollama to start: %w", err) } From 3ac88ef16ddb7f474d55fdd63f0ab445a44ece0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 17:35:23 +0100 Subject: [PATCH 04/42] chore: print local binary logs on error --- modules/ollama/local.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 7300b4dd0f..6de9ea0f8e 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -115,6 +115,18 @@ func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.Fi func waitForOllama(ctx context.Context, c *OllamaContainer) error { err := wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(ctx, c) if err != nil { + logs, err := c.Logs(ctx) + if err != nil { + return fmt.Errorf("wait for ollama to start: %w", err) + } + + bs, err := io.ReadAll(logs) + if err != nil { + return fmt.Errorf("read ollama logs: %w", err) + } + + testcontainers.Logger.Printf("ollama logs:\n%s", string(bs)) + return fmt.Errorf("wait for ollama to start: %w", err) } From 9e63a7e991e45e27039a82b5732c32796476c28a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 18:11:57 +0100 Subject: [PATCH 05/42] chore: remove debug logs --- modules/ollama/local.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 6de9ea0f8e..87a10da36a 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -390,8 +390,6 @@ func (c *OllamaContainer) Start(ctx context.Context) error { return nil } - testcontainers.Logger.Printf("starting ollama") - serveCmd, logFile, err := startOllama(context.Background(), c.localCtx) if err != nil { c.localCtx.mx.Unlock() @@ -409,8 +407,6 @@ func (c *OllamaContainer) Start(ctx context.Context) error { return fmt.Errorf("wait for ollama to start: %w", err) } - testcontainers.Logger.Printf("ollama started") - return nil } @@ -450,8 +446,6 @@ func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { c.localCtx.mx.Lock() defer c.localCtx.mx.Unlock() - testcontainers.Logger.Printf("stopping ollama") - if c.localCtx.serveCmd == nil { return nil } @@ -462,8 +456,6 @@ func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { c.localCtx.serveCmd = nil - testcontainers.Logger.Printf("ollama stopped") - return nil } From 57ca76ab1e680857492565f619e523e7172cd9b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 18:16:36 +0100 Subject: [PATCH 06/42] fix(ci): kill ollama before the tests --- .github/scripts/modules/ollama/install-dependencies.sh | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/scripts/modules/ollama/install-dependencies.sh b/.github/scripts/modules/ollama/install-dependencies.sh index f041595b4b..425829c018 100755 --- a/.github/scripts/modules/ollama/install-dependencies.sh +++ b/.github/scripts/modules/ollama/install-dependencies.sh @@ -1,3 +1,6 @@ #!/usr/bin/env bash curl -fsSL https://ollama.com/install.sh | sh + +# kill any running ollama process so that the tests can start from +pkill ollama From e4d2234df5a69bbf72e593cc095075010a8cce35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Tue, 3 Dec 2024 22:28:33 +0100 Subject: [PATCH 07/42] chore: stop ollama using systemctl --- .github/scripts/modules/ollama/install-dependencies.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/scripts/modules/ollama/install-dependencies.sh b/.github/scripts/modules/ollama/install-dependencies.sh index 425829c018..d699158806 100755 --- a/.github/scripts/modules/ollama/install-dependencies.sh +++ b/.github/scripts/modules/ollama/install-dependencies.sh @@ -2,5 +2,5 @@ curl -fsSL https://ollama.com/install.sh | sh -# kill any running ollama process so that the tests can start from -pkill ollama +# kill any running ollama process so that the tests can start from a clean state +sudo systemctl stop ollama.service From 01134ebb8b3d0bc87b93a72e1d533ecfa3ee0868 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Wed, 4 Dec 2024 12:59:17 +0100 Subject: [PATCH 08/42] chore: support setting log file from the env --- docs/modules/ollama.md | 25 ++++++++++++++++--------- modules/ollama/local.go | 5 +++++ modules/ollama/local_test.go | 20 ++++++++++++++++++++ 3 files changed, 41 insertions(+), 9 deletions(-) diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index ec2b61f789..53c045d95c 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -57,6 +57,11 @@ E.g. `Run(context.Background(), "ollama/ollama:0.1.25")`. - Not available until the next release of testcontainers-go :material-tag: main +!!!warning + Please make sure the local Ollama binary is not running when using the local version of the module: + Ollama can be started as a system service, or as part of the Ollama application, + and interacting with the logs of a running Ollama process not managed by the module is not supported. + If you need to run the local Ollama binary, you can set the `UseLocal` option in the `Run` function. This option accepts a list of environment variables as a string, that will be applied to the Ollama binary when executing commands. @@ -65,20 +70,22 @@ E.g. `Run(context.Background(), "ollama/ollama:0.1.25", WithUseLocal("OLLAMA_DEB All the container methods are available when using the local Ollama binary, but will be executed locally instead of inside the container. Please consider the following differences when using the local Ollama binary: -- The local Ollama binary will create a log file in the current working directory, identified by the session ID. E.g. `local-ollama-.log`. -- `ConnectionString` returns the connection string to connect to the local Ollama binary instead of the container, which maps to `127.0.0.1:11434`. +- The local Ollama binary will create a log file in the current working directory, identified by the session ID. E.g. `local-ollama-.log`. It's possible to set the log file name using the `OLLAMA_LOGFILE` environment variable. So if you're running Ollama yourself, from the Ollama app, or the standalone binary, you could use this environment variable to set the same log file name. + - For the Ollama app, the default log file resides in the `$HOME/.ollama/logs/server.log`. + - For the standalone binary, you should start it redirecting the logs to a file. E.g. `ollama serve > /tmp/ollama.log 2>&1`. +- `ConnectionString` returns the connection string to connect to the local Ollama binary started by the module instead of the container, which maps to `127.0.0.1:11434`. - `ContainerIP` returns `127.0.0.1`. - `ContainerIPs` returns `["127.0.0.1"]`. - `CopyToContainer`, `CopyDirToContainer`, `CopyFileToContainer` and `CopyFileFromContainer` don't perform any action. - `GetLogProductionErrorChannel` returns a nil channel. -- `Endpoint` returns the endpoint to connect to the local Ollama binary instead of the container, which maps to `127.0.0.1:11434`. -- `Exec` passes the command to the local Ollama binary instead of inside the container. First argument is the command to execute, and the second argument is the list of arguments. -- `GetContainerID` returns the container ID of the local Ollama binary instead of the container, which maps to `local-ollama-`. +- `Endpoint` returns the endpoint to connect to the local Ollama binary started by the module instead of the container, which maps to `127.0.0.1:11434`. +- `Exec` passes the command to the local Ollama binary started by the module instead of inside the container. First argument is the command to execute, and the second argument is the list of arguments. +- `GetContainerID` returns the container ID of the local Ollama binary started by the module instead of the container, which maps to `local-ollama-`. - `Host` returns `127.0.0.1`. -- `Inspect` returns a ContainerJSON with the state of the local Ollama binary. -- `IsRunning` returns true if the local Ollama binary process is running. -- `Logs` returns the logs from the local Ollama binary instead of the container. -- `MappedPort` returns the port mapping for the local Ollama binary instead of the container. +- `Inspect` returns a ContainerJSON with the state of the local Ollama binary started by the module. +- `IsRunning` returns true if the local Ollama binary process started by the module is running. +- `Logs` returns the logs from the local Ollama binary started by the module instead of the container. +- `MappedPort` returns the port mapping for the local Ollama binary started by the module instead of the container. - `Start` starts the local Ollama binary process. - `State` returns the current state of the local Ollama binary process, `stopped` or `running`. - `Stop` stops the local Ollama binary process. diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 87a10da36a..e69cdf8cf9 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -74,6 +74,11 @@ func runLocal(env map[string]string) (*OllamaContainer, error) { // logFile returns an existing log file or creates a new one if it doesn't exist. func logFile() (*os.File, error) { logName := "local-ollama-" + testcontainers.SessionID() + ".log" + + if envLogName := os.Getenv("OLLAMA_LOGFILE"); envLogName != "" { + logName = envLogName + } + if _, err := os.Stat(logName); err == nil { return os.Open(logName) } diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index c8d75ce6a5..e987274442 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -5,6 +5,7 @@ import ( "io" "os" "os/exec" + "path/filepath" "testing" "time" @@ -239,3 +240,22 @@ func TestRun_local(t *testing.T) { require.Equal(t, "stopped", state.Status) }) } + +func TestRun_localWithCustomLogFile(t *testing.T) { + t.Setenv("OLLAMA_LOGFILE", filepath.Join(t.TempDir(), "server.log")) + + ctx := context.Background() + + ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal("FOO=BAR")) + require.NoError(t, err) + testcontainers.CleanupContainer(t, ollamaContainer) + + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + defer logs.Close() + + bs, err := io.ReadAll(logs) + require.NoError(t, err) + + require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") +} From 5c1b4044a52b8eb4675dbfdf7d8c5649a3ff2825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Wed, 4 Dec 2024 13:26:55 +0100 Subject: [PATCH 09/42] chore: support running ollama commands, only --- docs/modules/ollama.md | 2 +- modules/ollama/local.go | 10 ++++++++++ modules/ollama/local_test.go | 20 ++++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index 53c045d95c..8c7738f8b1 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -79,7 +79,7 @@ Please consider the following differences when using the local Ollama binary: - `CopyToContainer`, `CopyDirToContainer`, `CopyFileToContainer` and `CopyFileFromContainer` don't perform any action. - `GetLogProductionErrorChannel` returns a nil channel. - `Endpoint` returns the endpoint to connect to the local Ollama binary started by the module instead of the container, which maps to `127.0.0.1:11434`. -- `Exec` passes the command to the local Ollama binary started by the module instead of inside the container. First argument is the command to execute, and the second argument is the list of arguments. +- `Exec` passes the command to the local Ollama binary started by the module instead of inside the container. First argument is the command to execute, and the second argument is the list of arguments, else, an error is returned. - `GetContainerID` returns the container ID of the local Ollama binary started by the module instead of the container, which maps to `local-ollama-`. - `Host` returns `127.0.0.1`. - `Inspect` returns a ContainerJSON with the state of the local Ollama binary started by the module. diff --git a/modules/ollama/local.go b/modules/ollama/local.go index e69cdf8cf9..e3e685f3bf 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -3,10 +3,12 @@ package ollama import ( "bytes" "context" + "errors" "fmt" "io" "os" "os/exec" + "strings" "sync" "syscall" "time" @@ -219,6 +221,14 @@ func (c *OllamaContainer) Exec(ctx context.Context, cmd []string, options ...tce c.localCtx.mx.Lock() defer c.localCtx.mx.Unlock() + if len(cmd) == 0 { + err := errors.New("exec: no command provided") + return 1, strings.NewReader(err.Error()), err + } else if cmd[0] != "ollama" { + err := fmt.Errorf("%s: %w", cmd[0], errors.ErrUnsupported) + return 1, strings.NewReader(err.Error()), err + } + args := []string{} if len(cmd) > 1 { args = cmd[1:] // prevent when there is only one command diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index e987274442..6e85814e9e 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -2,6 +2,7 @@ package ollama_test import ( "context" + "errors" "io" "os" "os/exec" @@ -104,6 +105,25 @@ func TestRun_local(t *testing.T) { require.Contains(t, string(bs), "llama runner started") }) + t.Run("exec/unsupported-command", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{"cat", "/etc/passwd"}) + require.Equal(t, 1, code) + require.Error(t, err) + require.ErrorIs(t, err, errors.ErrUnsupported) + + bs, err := io.ReadAll(r) + require.NoError(t, err) + require.Equal(t, "cat: unsupported operation", string(bs)) + + code, r, err = ollamaContainer.Exec(ctx, []string{}) + require.Equal(t, 1, code) + require.Error(t, err) + + bs, err = io.ReadAll(r) + require.NoError(t, err) + require.Equal(t, "exec: no command provided", string(bs)) + }) + t.Run("is-running", func(t *testing.T) { require.True(t, ollamaContainer.IsRunning()) From ce04a0ee03f47982a3d3d4e5f0267ac16a27bf87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 07:25:14 +0100 Subject: [PATCH 10/42] fix: release lock on error --- modules/ollama/local.go | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index e3e685f3bf..af52ce76f8 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -55,6 +55,7 @@ func runLocal(env map[string]string) (*OllamaContainer, error) { serveCmd, logFile, err := startOllama(context.Background(), c.localCtx) if err != nil { + c.localCtx.mx.Unlock() return nil, fmt.Errorf("start ollama: %w", err) } From 99e2655002eae752e2a1ddc5f6273ad4ecf245e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 07:36:15 +0100 Subject: [PATCH 11/42] chore: add more test coverage for the option --- modules/ollama/options_test.go | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/ollama/options_test.go b/modules/ollama/options_test.go index 67d33e5732..f842d15a17 100644 --- a/modules/ollama/options_test.go +++ b/modules/ollama/options_test.go @@ -33,6 +33,14 @@ func TestWithUseLocal(t *testing.T) { require.Equal(t, "localhost", req.Env["OLLAMA_HOST"]) }) + t.Run("keyVal/valid/multiple-equals", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST=localhost=127.0.0.1") + err := opt.Customize(&req) + require.NoError(t, err) + require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) + require.Equal(t, "localhost=127.0.0.1", req.Env["OLLAMA_HOST"]) + }) + t.Run("keyVal/invalid/multiple", func(t *testing.T) { opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST") err := opt.Customize(&req) From 6c50334a24f232e9312b7489cee69a4c98afa99c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 07:36:53 +0100 Subject: [PATCH 12/42] chore: simplify useLocal checks --- modules/ollama/local.go | 50 +++++++++++++++++++--------------------- modules/ollama/ollama.go | 6 ++--- 2 files changed, 27 insertions(+), 29 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index af52ce76f8..e7ae9d6158 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -29,7 +29,6 @@ var defaultStopTimeout = time.Second * 5 // localContext is a type holding the context for local Ollama executions. type localContext struct { - useLocal bool env []string serveCmd *exec.Cmd logFile *os.File @@ -46,8 +45,7 @@ func runLocal(env map[string]string) (*OllamaContainer, error) { c := &OllamaContainer{ localCtx: &localContext{ - useLocal: true, - env: cmdEnv, + env: cmdEnv, }, } @@ -143,7 +141,7 @@ func waitForOllama(ctx context.Context, c *OllamaContainer) error { // ContainerIP returns the IP address of the local Ollama binary. func (c *OllamaContainer) ContainerIP(ctx context.Context) (string, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.ContainerIP(ctx) } @@ -152,7 +150,7 @@ func (c *OllamaContainer) ContainerIP(ctx context.Context) (string, error) { // ContainerIPs returns a slice with the IP address of the local Ollama binary. func (c *OllamaContainer) ContainerIPs(ctx context.Context) ([]string, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.ContainerIPs(ctx) } @@ -161,7 +159,7 @@ func (c *OllamaContainer) ContainerIPs(ctx context.Context) ([]string, error) { // CopyToContainer is a no-op for the local Ollama binary. func (c *OllamaContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.CopyToContainer(ctx, fileContent, containerFilePath, fileMode) } @@ -170,7 +168,7 @@ func (c *OllamaContainer) CopyToContainer(ctx context.Context, fileContent []byt // CopyDirToContainer is a no-op for the local Ollama binary. func (c *OllamaContainer) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.CopyDirToContainer(ctx, hostDirPath, containerParentPath, fileMode) } @@ -179,7 +177,7 @@ func (c *OllamaContainer) CopyDirToContainer(ctx context.Context, hostDirPath st // CopyFileToContainer is a no-op for the local Ollama binary. func (c *OllamaContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.CopyFileToContainer(ctx, hostFilePath, containerFilePath, fileMode) } @@ -188,7 +186,7 @@ func (c *OllamaContainer) CopyFileToContainer(ctx context.Context, hostFilePath // CopyFileFromContainer is a no-op for the local Ollama binary. func (c *OllamaContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.CopyFileFromContainer(ctx, filePath) } @@ -197,7 +195,7 @@ func (c *OllamaContainer) CopyFileFromContainer(ctx context.Context, filePath st // GetLogProductionErrorChannel returns a nil channel. func (c *OllamaContainer) GetLogProductionErrorChannel() <-chan error { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.GetLogProductionErrorChannel() } @@ -206,7 +204,7 @@ func (c *OllamaContainer) GetLogProductionErrorChannel() <-chan error { // Endpoint returns the 127.0.0.1:11434 endpoint for the local Ollama binary. func (c *OllamaContainer) Endpoint(ctx context.Context, port string) (string, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Endpoint(ctx, port) } @@ -215,7 +213,7 @@ func (c *OllamaContainer) Endpoint(ctx context.Context, port string) (string, er // Exec executes a command using the local Ollama binary. func (c *OllamaContainer) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Exec(ctx, cmd, options...) } @@ -257,7 +255,7 @@ func prepareExec(ctx context.Context, bin string, args []string, env []string, o // GetContainerID returns a placeholder ID for local execution func (c *OllamaContainer) GetContainerID() string { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.GetContainerID() } @@ -266,7 +264,7 @@ func (c *OllamaContainer) GetContainerID() string { // Host returns the 127.0.0.1 address for the local Ollama binary. func (c *OllamaContainer) Host(ctx context.Context) (string, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Host(ctx) } @@ -277,7 +275,7 @@ func (c *OllamaContainer) Host(ctx context.Context) (string, error) { // The version is read from the local Ollama binary (ollama -v), and the port // mapping is set to 11434. func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Inspect(ctx) } @@ -332,7 +330,7 @@ func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, er // IsRunning returns true if the local Ollama process is running. func (c *OllamaContainer) IsRunning() bool { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.IsRunning() } @@ -344,7 +342,7 @@ func (c *OllamaContainer) IsRunning() bool { // Logs returns the logs from the local Ollama binary. func (c *OllamaContainer) Logs(ctx context.Context) (io.ReadCloser, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Logs(ctx) } @@ -357,7 +355,7 @@ func (c *OllamaContainer) Logs(ctx context.Context) (io.ReadCloser, error) { // MappedPort returns the configured port for local Ollama binary. func (c *OllamaContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.MappedPort(ctx, port) } @@ -367,7 +365,7 @@ func (c *OllamaContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Po // Networks returns the networks for local Ollama binary, which is empty. func (c *OllamaContainer) Networks(ctx context.Context) ([]string, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Networks(ctx) } @@ -376,7 +374,7 @@ func (c *OllamaContainer) Networks(ctx context.Context) ([]string, error) { // NetworkAliases returns the network aliases for local Ollama binary, which is empty. func (c *OllamaContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.NetworkAliases(ctx) } @@ -386,7 +384,7 @@ func (c *OllamaContainer) NetworkAliases(ctx context.Context) (map[string][]stri // SessionID returns the session ID for local Ollama binary, which is the session ID // of the test execution. func (c *OllamaContainer) SessionID() string { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.SessionID() } @@ -395,7 +393,7 @@ func (c *OllamaContainer) SessionID() string { // Start starts the local Ollama process, not failing if it's already running. func (c *OllamaContainer) Start(ctx context.Context) error { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Start(ctx) } @@ -406,7 +404,7 @@ func (c *OllamaContainer) Start(ctx context.Context) error { return nil } - serveCmd, logFile, err := startOllama(context.Background(), c.localCtx) + serveCmd, logFile, err := startOllama(ctx, c.localCtx) if err != nil { c.localCtx.mx.Unlock() return fmt.Errorf("start ollama: %w", err) @@ -429,7 +427,7 @@ func (c *OllamaContainer) Start(ctx context.Context) error { // State returns the current state of the Ollama process, simulating a container state // for local execution. func (c *OllamaContainer) State(ctx context.Context) (*types.ContainerState, error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.State(ctx) } @@ -455,7 +453,7 @@ func (c *OllamaContainer) State(ctx context.Context) (*types.ContainerState, err // Stop gracefully stops the local Ollama process func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Stop(ctx, d) } @@ -477,7 +475,7 @@ func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { // Terminate stops the local Ollama process, removing the log file. func (c *OllamaContainer) Terminate(ctx context.Context) (err error) { - if !c.localCtx.useLocal { + if c.localCtx == nil { return c.Container.Terminate(ctx) } diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index 2f8d7e396d..f71323ca04 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -26,7 +26,7 @@ type OllamaContainer struct { // ConnectionString returns the connection string for the Ollama container, // using the default port 11434. func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) { - if c.localCtx.useLocal { + if c.localCtx != nil { return "http://127.0.0.1:11434", nil } @@ -48,7 +48,7 @@ func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) // of the container into a new image with the given name, so it doesn't override existing images. // It should be used for creating an image that contains a loaded model. func (c *OllamaContainer) Commit(ctx context.Context, targetImage string) error { - if c.localCtx.useLocal { + if c.localCtx != nil { return nil } @@ -125,7 +125,7 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom container, err := testcontainers.GenericContainer(ctx, genericContainerReq) var c *OllamaContainer if container != nil { - c = &OllamaContainer{Container: container, localCtx: &localContext{useLocal: false}} + c = &OllamaContainer{Container: container} } if err != nil { From 6a06b4d087cbaf5ddc3e7bbca7ff1468bb34744c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 07:38:59 +0100 Subject: [PATCH 13/42] chore: simpolify --- modules/ollama/local.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index e7ae9d6158..51b63a7afc 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -38,9 +38,9 @@ type localContext struct { // runLocal calls the local Ollama binary instead of using a Docker container. func runLocal(env map[string]string) (*OllamaContainer, error) { // Apply the environment variables to the command. - cmdEnv := []string{} + cmdEnv := make([]string, 0, len(env)*2) for k, v := range env { - cmdEnv = append(cmdEnv, fmt.Sprintf("%s=%s", k, v)) + cmdEnv = append(cmdEnv, k+"="+v) } c := &OllamaContainer{ From bd85c0effcf8b60fdfcad4c5c9c784e2372cb8a5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 07:40:33 +0100 Subject: [PATCH 14/42] chore: pass context to runLocal --- modules/ollama/local.go | 6 +++--- modules/ollama/ollama.go | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 51b63a7afc..49bd176e51 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -36,7 +36,7 @@ type localContext struct { } // runLocal calls the local Ollama binary instead of using a Docker container. -func runLocal(env map[string]string) (*OllamaContainer, error) { +func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, error) { // Apply the environment variables to the command. cmdEnv := make([]string, 0, len(env)*2) for k, v := range env { @@ -51,7 +51,7 @@ func runLocal(env map[string]string) (*OllamaContainer, error) { c.localCtx.mx.Lock() - serveCmd, logFile, err := startOllama(context.Background(), c.localCtx) + serveCmd, logFile, err := startOllama(ctx, c.localCtx) if err != nil { c.localCtx.mx.Unlock() return nil, fmt.Errorf("start ollama: %w", err) @@ -61,7 +61,7 @@ func runLocal(env map[string]string) (*OllamaContainer, error) { c.localCtx.logFile = logFile c.localCtx.mx.Unlock() - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() err = waitForOllama(ctx, c) diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index f71323ca04..56acb95226 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -114,7 +114,7 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom } if options.useLocal { - container, err := runLocal(req.Env) + container, err := runLocal(ctx, req.Env) if err == nil { return container, nil } From 6239947ac75496bfeaf98d28cc143feb35ccf383 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 07:41:49 +0100 Subject: [PATCH 15/42] chore: move ctx to the right scope --- modules/ollama/local.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 49bd176e51..8872ab0552 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -61,9 +61,6 @@ func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, err c.localCtx.logFile = logFile c.localCtx.mx.Unlock() - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - err = waitForOllama(ctx, c) if err != nil { return nil, fmt.Errorf("wait for ollama to start: %w", err) @@ -119,6 +116,9 @@ func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.Fi // Wait until the Ollama process is ready, checking that the log file contains // the "Listening on 127.0.0.1:11434" message func waitForOllama(ctx context.Context, c *OllamaContainer) error { + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + err := wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(ctx, c) if err != nil { logs, err := c.Logs(ctx) From 811eb6dc6e661a8baa8e24c14f292c463aaa39f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 08:21:06 +0100 Subject: [PATCH 16/42] chore: remove not needed --- modules/ollama/local.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 8872ab0552..d2ac54dcf8 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -77,10 +77,6 @@ func logFile() (*os.File, error) { logName = envLogName } - if _, err := os.Stat(logName); err == nil { - return os.Open(logName) - } - file, err := os.Create(logName) if err != nil { return nil, fmt.Errorf("create ollama log file: %w", err) From 55569719ec49115151d1d4ad34fc8120e6bc72dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 08:22:14 +0100 Subject: [PATCH 17/42] chore: use a container function --- modules/ollama/local.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index d2ac54dcf8..a4c3a5f74c 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -61,7 +61,7 @@ func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, err c.localCtx.logFile = logFile c.localCtx.mx.Unlock() - err = waitForOllama(ctx, c) + err = c.waitForOllama(ctx) if err != nil { return nil, fmt.Errorf("wait for ollama to start: %w", err) } @@ -109,9 +109,9 @@ func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.Fi return serveCmd, logFile, nil } -// Wait until the Ollama process is ready, checking that the log file contains +// waitForOllama Wait until the Ollama process is ready, checking that the log file contains // the "Listening on 127.0.0.1:11434" message -func waitForOllama(ctx context.Context, c *OllamaContainer) error { +func (c *OllamaContainer) waitForOllama(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() @@ -412,7 +412,7 @@ func (c *OllamaContainer) Start(ctx context.Context) error { waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - err = waitForOllama(waitCtx, c) + err = c.waitForOllama(waitCtx) if err != nil { return fmt.Errorf("wait for ollama to start: %w", err) } From c68ff22004e05c7efb0d28ff2cb29c776c8c5701 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 08:57:46 +0100 Subject: [PATCH 18/42] chore: support reading OLLAMA_HOST --- docs/modules/ollama.md | 4 +++ modules/ollama/local.go | 42 ++++++++++++++++++------- modules/ollama/local_test.go | 61 ++++++++++++++++++++++++++++++++++++ modules/ollama/ollama.go | 2 +- 4 files changed, 97 insertions(+), 12 deletions(-) diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index 8c7738f8b1..bffe63648e 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -93,6 +93,10 @@ Please consider the following differences when using the local Ollama binary: The local Ollama binary will create a log file in the current working directory, and it will be available in the container's `Logs` method. +!!!info + The local Ollama binary will use the `OLLAMA_HOST` environment variable to set the host and port to listen on. + If the environment variable is not set, it will use the default host `127.0.0.1` and port `11434`. + {% include "../features/common_functional_options.md" %} ### Container Methods diff --git a/modules/ollama/local.go b/modules/ollama/local.go index a4c3a5f74c..9eaa2fed65 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "net" "os" "os/exec" "strings" @@ -23,7 +24,10 @@ import ( "github.com/testcontainers/testcontainers-go/wait" ) -const localIP = "127.0.0.1" +const ( + localIP = "127.0.0.1" + localPort = "11434" +) var defaultStopTimeout = time.Second * 5 @@ -33,6 +37,8 @@ type localContext struct { serveCmd *exec.Cmd logFile *os.File mx sync.Mutex + host string + port string } // runLocal calls the local Ollama binary instead of using a Docker container. @@ -43,10 +49,24 @@ func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, err cmdEnv = append(cmdEnv, k+"="+v) } + localCtx := &localContext{ + env: cmdEnv, + host: localIP, + port: localPort, + } + + if envHost := os.Getenv("OLLAMA_HOST"); envHost != "" { + host, port, err := net.SplitHostPort(envHost) + if err != nil { + return nil, fmt.Errorf("invalid OLLAMA_HOST: %w", err) + } + + localCtx.host = host + localCtx.port = port + } + c := &OllamaContainer{ - localCtx: &localContext{ - env: cmdEnv, - }, + localCtx: localCtx, } c.localCtx.mx.Lock() @@ -115,7 +135,7 @@ func (c *OllamaContainer) waitForOllama(ctx context.Context) error { ctx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() - err := wait.ForLog("Listening on "+localIP+":11434").WaitUntilReady(ctx, c) + err := wait.ForLog("Listening on "+c.localCtx.host+":"+c.localCtx.port).WaitUntilReady(ctx, c) if err != nil { logs, err := c.Logs(ctx) if err != nil { @@ -204,7 +224,7 @@ func (c *OllamaContainer) Endpoint(ctx context.Context, port string) (string, er return c.Container.Endpoint(ctx, port) } - return localIP + ":11434", nil + return c.localCtx.host + ":" + c.localCtx.port, nil } // Exec executes a command using the local Ollama binary. @@ -302,7 +322,7 @@ func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, er Config: &container.Config{ Image: string(bs), ExposedPorts: nat.PortSet{ - "11434/tcp": struct{}{}, + nat.Port(c.localCtx.port + "/tcp"): struct{}{}, }, Hostname: "localhost", Entrypoint: []string{"ollama", "serve"}, @@ -312,13 +332,13 @@ func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, er NetworkSettingsBase: types.NetworkSettingsBase{ Bridge: "bridge", Ports: nat.PortMap{ - "11434/tcp": { - {HostIP: localIP, HostPort: "11434"}, + nat.Port(c.localCtx.port + "/tcp"): { + {HostIP: c.localCtx.host, HostPort: c.localCtx.port}, }, }, }, DefaultNetworkSettings: types.DefaultNetworkSettings{ - IPAddress: localIP, + IPAddress: c.localCtx.host, }, }, }, nil @@ -356,7 +376,7 @@ func (c *OllamaContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Po } // Ollama typically uses port 11434 by default - return "11434/tcp", nil + return nat.Port(c.localCtx.port + "/tcp"), nil } // Networks returns the networks for local Ollama binary, which is empty. diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index 6e85814e9e..b555fee074 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -279,3 +279,64 @@ func TestRun_localWithCustomLogFile(t *testing.T) { require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") } + +func TestRun_localWithCustomHost(t *testing.T) { + t.Setenv("OLLAMA_HOST", "127.0.0.1:1234") + + ctx := context.Background() + + ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal("FOO=BAR")) + require.NoError(t, err) + testcontainers.CleanupContainer(t, ollamaContainer) + + t.Run("connection-string", func(t *testing.T) { + connectionStr, err := ollamaContainer.ConnectionString(ctx) + require.NoError(t, err) + require.Equal(t, "http://127.0.0.1:1234", connectionStr) + }) + + t.Run("endpoint", func(t *testing.T) { + endpoint, err := ollamaContainer.Endpoint(ctx, "1234/tcp") + require.NoError(t, err) + require.Equal(t, "127.0.0.1:1234", endpoint) + }) + + t.Run("inspect", func(t *testing.T) { + inspect, err := ollamaContainer.Inspect(ctx) + require.NoError(t, err) + + require.Contains(t, string(inspect.Config.Image), "ollama version is") + _, exists := inspect.Config.ExposedPorts["1234/tcp"] + require.True(t, exists) + require.Equal(t, "localhost", inspect.Config.Hostname) + require.Equal(t, strslice.StrSlice(strslice.StrSlice{"ollama", "serve"}), inspect.Config.Entrypoint) + + require.Empty(t, inspect.NetworkSettings.Networks) + require.Equal(t, "bridge", inspect.NetworkSettings.NetworkSettingsBase.Bridge) + + ports := inspect.NetworkSettings.NetworkSettingsBase.Ports + _, exists = ports["1234/tcp"] + require.True(t, exists) + + require.Equal(t, "127.0.0.1", inspect.NetworkSettings.Ports["1234/tcp"][0].HostIP) + require.Equal(t, "1234", inspect.NetworkSettings.Ports["1234/tcp"][0].HostPort) + }) + + t.Run("logs", func(t *testing.T) { + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + defer logs.Close() + + bs, err := io.ReadAll(logs) + require.NoError(t, err) + + require.Contains(t, string(bs), "Listening on 127.0.0.1:1234") + }) + + t.Run("mapped-port", func(t *testing.T) { + port, err := ollamaContainer.MappedPort(ctx, "1234/tcp") + require.NoError(t, err) + require.Equal(t, "1234", port.Port()) + require.Equal(t, "tcp", port.Proto()) + }) +} diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index 56acb95226..db573ed903 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -27,7 +27,7 @@ type OllamaContainer struct { // using the default port 11434. func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) { if c.localCtx != nil { - return "http://127.0.0.1:11434", nil + return "http://" + c.localCtx.host + ":" + c.localCtx.port, nil } host, err := c.Host(ctx) From b5e98745a0c484d9739dd13969b296d48d53def5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:01:58 +0100 Subject: [PATCH 19/42] chore: return error with copy APIs --- modules/ollama/local.go | 13 ++++++++----- modules/ollama/local_test.go | 8 ++++---- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 9eaa2fed65..6f05a60cc5 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -29,7 +29,10 @@ const ( localPort = "11434" ) -var defaultStopTimeout = time.Second * 5 +var ( + defaultStopTimeout = time.Second * 5 + errCopyAPIsNotSupported = errors.New("copy APIs are not supported for local Ollama binary") +) // localContext is a type holding the context for local Ollama executions. type localContext struct { @@ -179,7 +182,7 @@ func (c *OllamaContainer) CopyToContainer(ctx context.Context, fileContent []byt return c.Container.CopyToContainer(ctx, fileContent, containerFilePath, fileMode) } - return nil + return errCopyAPIsNotSupported } // CopyDirToContainer is a no-op for the local Ollama binary. @@ -188,7 +191,7 @@ func (c *OllamaContainer) CopyDirToContainer(ctx context.Context, hostDirPath st return c.Container.CopyDirToContainer(ctx, hostDirPath, containerParentPath, fileMode) } - return nil + return errCopyAPIsNotSupported } // CopyFileToContainer is a no-op for the local Ollama binary. @@ -197,7 +200,7 @@ func (c *OllamaContainer) CopyFileToContainer(ctx context.Context, hostFilePath return c.Container.CopyFileToContainer(ctx, hostFilePath, containerFilePath, fileMode) } - return nil + return errCopyAPIsNotSupported } // CopyFileFromContainer is a no-op for the local Ollama binary. @@ -206,7 +209,7 @@ func (c *OllamaContainer) CopyFileFromContainer(ctx context.Context, filePath st return c.Container.CopyFileFromContainer(ctx, filePath) } - return nil, nil + return nil, errCopyAPIsNotSupported } // GetLogProductionErrorChannel returns a nil channel. diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index b555fee074..8dabf5e295 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -57,16 +57,16 @@ func TestRun_local(t *testing.T) { t.Run("copy", func(t *testing.T) { err := ollamaContainer.CopyToContainer(ctx, []byte("test"), "/tmp", 0o755) - require.NoError(t, err) + require.Error(t, err) err = ollamaContainer.CopyDirToContainer(ctx, ".", "/tmp", 0o755) - require.NoError(t, err) + require.Error(t, err) err = ollamaContainer.CopyFileToContainer(ctx, ".", "/tmp", 0o755) - require.NoError(t, err) + require.Error(t, err) reader, err := ollamaContainer.CopyFileFromContainer(ctx, "/tmp") - require.NoError(t, err) + require.Error(t, err) require.Nil(t, reader) }) From c39e5543f9240fa3b74839eecdd4e97ee793a7af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:05:15 +0100 Subject: [PATCH 20/42] chore: simply execute the script --- .github/workflows/ci-test-go.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-test-go.yml b/.github/workflows/ci-test-go.yml index b2bedb6836..0d6af15880 100644 --- a/.github/workflows/ci-test-go.yml +++ b/.github/workflows/ci-test-go.yml @@ -112,7 +112,7 @@ jobs: run: | SCRIPT_PATH="./.github/scripts/${{ inputs.project-directory }}/install-dependencies.sh" if [ -f "$SCRIPT_PATH" ]; then - bash "$SCRIPT_PATH" + $SCRIPT_PATH else echo "No dependencies script found at $SCRIPT_PATH - skipping installation" fi From 556c2f54469628165eec66f335ca051c6a608db5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:06:41 +0100 Subject: [PATCH 21/42] chore: simplify var initialisation --- modules/ollama/local.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 6f05a60cc5..8bdf7a4811 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -304,14 +304,14 @@ func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, er } // read the version from the ollama binary - buf := &bytes.Buffer{} - command := prepareExec(ctx, "ollama", []string{"-v"}, c.localCtx.env, buf) + var buf bytes.Buffer + command := prepareExec(ctx, "ollama", []string{"-v"}, c.localCtx.env, &buf) err = command.Run() if err != nil { return nil, fmt.Errorf("read ollama -v output: %w", err) } - bs, err := io.ReadAll(buf) + bs, err := io.ReadAll(&buf) if err != nil { return nil, fmt.Errorf("read ollama -v output: %w", err) } From d3e7a49d2281f1ffc5dcccc55b8f3392cc82a0f2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:08:20 +0100 Subject: [PATCH 22/42] chore: return nil --- modules/ollama/local.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 8bdf7a4811..9f0e155e2e 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -382,22 +382,22 @@ func (c *OllamaContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Po return nat.Port(c.localCtx.port + "/tcp"), nil } -// Networks returns the networks for local Ollama binary, which is empty. +// Networks returns the networks for local Ollama binary, which is a nil slice. func (c *OllamaContainer) Networks(ctx context.Context) ([]string, error) { if c.localCtx == nil { return c.Container.Networks(ctx) } - return []string{}, nil + return nil, nil } -// NetworkAliases returns the network aliases for local Ollama binary, which is empty. +// NetworkAliases returns the network aliases for local Ollama binary, which is a nil map. func (c *OllamaContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { if c.localCtx == nil { return c.Container.NetworkAliases(ctx) } - return map[string][]string{}, nil + return nil, nil } // SessionID returns the session ID for local Ollama binary, which is the session ID From c38a640d8ed44a4dfb04a3581dd8c9d3dd97f506 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:09:50 +0100 Subject: [PATCH 23/42] fix: return errors on terminate --- modules/ollama/local.go | 36 +++++++++++++++++------------------- 1 file changed, 17 insertions(+), 19 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 9f0e155e2e..895a03dae9 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -493,38 +493,36 @@ func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { } // Terminate stops the local Ollama process, removing the log file. -func (c *OllamaContainer) Terminate(ctx context.Context) (err error) { +func (c *OllamaContainer) Terminate(ctx context.Context) error { if c.localCtx == nil { return c.Container.Terminate(ctx) } // First try to stop gracefully - err = c.Stop(ctx, &defaultStopTimeout) + err := c.Stop(ctx, &defaultStopTimeout) if err != nil { return fmt.Errorf("stop ollama: %w", err) } - defer func() { - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() + c.localCtx.mx.Lock() + defer c.localCtx.mx.Unlock() + + if c.localCtx.logFile == nil { + return nil + } - if c.localCtx.logFile == nil { - return + // remove the log file if it exists + if _, err = os.Stat(c.localCtx.logFile.Name()); err == nil { + err = c.localCtx.logFile.Close() + if err != nil { + return err } - // remove the log file if it exists - if _, err := os.Stat(c.localCtx.logFile.Name()); err == nil { - err = c.localCtx.logFile.Close() - if err != nil { - return - } - - err = os.Remove(c.localCtx.logFile.Name()) - if err != nil { - return - } + err = os.Remove(c.localCtx.logFile.Name()) + if err != nil { + return err } - }() + } return nil } From 77a39f38a36264633cf96815df5cbd9219445cfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:11:18 +0100 Subject: [PATCH 24/42] chore: remove options type --- modules/ollama/ollama.go | 6 +++--- modules/ollama/options.go | 8 -------- 2 files changed, 3 insertions(+), 11 deletions(-) diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index db573ed903..3d0cc6fa4e 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -103,17 +103,17 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom // always request a GPU if the host supports it opts = append(opts, withGpu()) - options := defaultOptions() + useLocal := false for _, opt := range opts { if err := opt.Customize(&genericContainerReq); err != nil { return nil, fmt.Errorf("customize: %w", err) } if _, ok := opt.(UseLocal); ok { - options.useLocal = true + useLocal = true } } - if options.useLocal { + if useLocal { container, err := runLocal(ctx, req.Env) if err == nil { return container, nil diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 82191e66f3..ed34326d71 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -10,14 +10,6 @@ import ( "github.com/testcontainers/testcontainers-go" ) -type options struct { - useLocal bool -} - -func defaultOptions() options { - return options{} -} - var noopCustomizeRequestOption = func(req *testcontainers.GenericContainerRequest) error { return nil } // withGpu requests a GPU for the container, which could improve performance for some models. From ffa0b2a876fdaafe5cfa4e08ca7ce0c8eba81883 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 09:18:00 +0100 Subject: [PATCH 25/42] chore: use a map --- modules/ollama/examples_test.go | 2 +- modules/ollama/local_test.go | 6 +++--- modules/ollama/options.go | 18 +++++------------- modules/ollama/options_test.go | 32 ++++++-------------------------- 4 files changed, 15 insertions(+), 43 deletions(-) diff --git a/modules/ollama/examples_test.go b/modules/ollama/examples_test.go index 188be45bbb..3601e0b120 100644 --- a/modules/ollama/examples_test.go +++ b/modules/ollama/examples_test.go @@ -178,7 +178,7 @@ func ExampleRun_withLocal() { ctx := context.Background() // localOllama { - ollamaContainer, err := tcollama.Run(ctx, "ollama/ollama:0.3.13", tcollama.WithUseLocal("OLLAMA_DEBUG=true")) + ollamaContainer, err := tcollama.Run(ctx, "ollama/ollama:0.3.13", tcollama.WithUseLocal(map[string]string{"OLLAMA_DEBUG": "true"})) defer func() { if err := testcontainers.TerminateContainer(ollamaContainer); err != nil { log.Printf("failed to terminate container: %s", err) diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index 8dabf5e295..3b95500b72 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -29,7 +29,7 @@ func TestRun_local(t *testing.T) { ollamaContainer, err := ollama.Run( ctx, "ollama/ollama:0.1.25", - ollama.WithUseLocal("FOO=BAR"), + ollama.WithUseLocal(map[string]string{"FOO": "BAR"}), ) testcontainers.CleanupContainer(t, ollamaContainer) require.NoError(t, err) @@ -266,7 +266,7 @@ func TestRun_localWithCustomLogFile(t *testing.T) { ctx := context.Background() - ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal("FOO=BAR")) + ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal(map[string]string{"FOO": "BAR"})) require.NoError(t, err) testcontainers.CleanupContainer(t, ollamaContainer) @@ -285,7 +285,7 @@ func TestRun_localWithCustomHost(t *testing.T) { ctx := context.Background() - ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal("FOO=BAR")) + ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal(nil)) require.NoError(t, err) testcontainers.CleanupContainer(t, ollamaContainer) diff --git a/modules/ollama/options.go b/modules/ollama/options.go index ed34326d71..4653b65169 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -2,8 +2,6 @@ package ollama import ( "context" - "fmt" - "strings" "github.com/docker/docker/api/types/container" @@ -44,13 +42,13 @@ var _ testcontainers.ContainerCustomizer = (*UseLocal)(nil) // UseLocal will use the local Ollama instance instead of pulling the Docker image. type UseLocal struct { - env []string + env map[string]string } // WithUseLocal the module will use the local Ollama instance instead of pulling the Docker image. // Pass the environment variables you need to set for the Ollama binary to be used, // in the format of "KEY=VALUE". KeyValue pairs with the wrong format will cause an error. -func WithUseLocal(keyVal ...string) UseLocal { +func WithUseLocal(keyVal map[string]string) UseLocal { return UseLocal{env: keyVal} } @@ -58,15 +56,9 @@ func WithUseLocal(keyVal ...string) UseLocal { // and setting them as environment variables for the Ollama binary. // In the case of an invalid key value pair, an error is returned. func (u UseLocal) Customize(req *testcontainers.GenericContainerRequest) error { - env := make(map[string]string) - for _, kv := range u.env { - parts := strings.SplitN(kv, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid environment variable: %s", kv) - } - - env[parts[0]] = parts[1] + if len(u.env) == 0 { + return nil } - return testcontainers.WithEnv(env)(req) + return testcontainers.WithEnv(u.env)(req) } diff --git a/modules/ollama/options_test.go b/modules/ollama/options_test.go index f842d15a17..46872d0dd4 100644 --- a/modules/ollama/options_test.go +++ b/modules/ollama/options_test.go @@ -12,38 +12,18 @@ import ( func TestWithUseLocal(t *testing.T) { req := testcontainers.GenericContainerRequest{} - t.Run("keyVal/valid", func(t *testing.T) { - opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models") + t.Run("empty", func(t *testing.T) { + opt := ollama.WithUseLocal(nil) err := opt.Customize(&req) require.NoError(t, err) - require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) - }) - - t.Run("keyVal/invalid", func(t *testing.T) { - opt := ollama.WithUseLocal("OLLAMA_MODELS") - err := opt.Customize(&req) - require.Error(t, err) - }) - - t.Run("keyVal/valid/multiple", func(t *testing.T) { - opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST=localhost") - err := opt.Customize(&req) - require.NoError(t, err) - require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) - require.Equal(t, "localhost", req.Env["OLLAMA_HOST"]) + require.Empty(t, req.Env) }) - t.Run("keyVal/valid/multiple-equals", func(t *testing.T) { - opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST=localhost=127.0.0.1") + t.Run("valid", func(t *testing.T) { + opt := ollama.WithUseLocal(map[string]string{"OLLAMA_MODELS": "/path/to/models", "OLLAMA_HOST": "localhost:1234"}) err := opt.Customize(&req) require.NoError(t, err) require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) - require.Equal(t, "localhost=127.0.0.1", req.Env["OLLAMA_HOST"]) - }) - - t.Run("keyVal/invalid/multiple", func(t *testing.T) { - opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST") - err := opt.Customize(&req) - require.Error(t, err) + require.Equal(t, "localhost:1234", req.Env["OLLAMA_HOST"]) }) } From 6c39254bf5fc3052cfb27ab418dcb8a1a2d70356 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 13 Dec 2024 11:51:40 +0100 Subject: [PATCH 26/42] chor: simplify error on wait --- modules/ollama/local.go | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 895a03dae9..adfe07b6f0 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -145,14 +145,9 @@ func (c *OllamaContainer) waitForOllama(ctx context.Context) error { return fmt.Errorf("wait for ollama to start: %w", err) } - bs, err := io.ReadAll(logs) - if err != nil { - return fmt.Errorf("read ollama logs: %w", err) - } - - testcontainers.Logger.Printf("ollama logs:\n%s", string(bs)) - - return fmt.Errorf("wait for ollama to start: %w", err) + // ignore error as we already have an error and the output is already logged + bs, _ := io.ReadAll(logs) + return fmt.Errorf("wait for ollama to start: %w. Container logs:\n%s", err, string(bs)) } return nil From 91396ee9f2c71c4e19992a733b4247677bd0a0f5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 07:49:37 +0100 Subject: [PATCH 27/42] chore: wrap start logic around the localContext --- modules/ollama/local.go | 40 ++++++++++++++++------------------------ 1 file changed, 16 insertions(+), 24 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index adfe07b6f0..f1458f8f9f 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -72,18 +72,11 @@ func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, err localCtx: localCtx, } - c.localCtx.mx.Lock() - - serveCmd, logFile, err := startOllama(ctx, c.localCtx) + err := c.localCtx.startOllama(ctx) if err != nil { - c.localCtx.mx.Unlock() return nil, fmt.Errorf("start ollama: %w", err) } - c.localCtx.serveCmd = serveCmd - c.localCtx.logFile = logFile - c.localCtx.mx.Unlock() - err = c.waitForOllama(ctx) if err != nil { return nil, fmt.Errorf("wait for ollama to start: %w", err) @@ -110,14 +103,21 @@ func logFile() (*os.File, error) { // startOllama starts the Ollama serve command in the background, writing to the // provided log file. -func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.File, error) { +func (localCtx *localContext) startOllama(ctx context.Context) error { + localCtx.mx.Lock() + defer localCtx.mx.Unlock() // unlock before waiting for the process to be ready + + if localCtx.serveCmd != nil { + return nil + } + serveCmd := exec.CommandContext(ctx, "ollama", "serve") serveCmd.Env = append(serveCmd.Env, localCtx.env...) serveCmd.Env = append(serveCmd.Env, os.Environ()...) logFile, err := logFile() if err != nil { - return nil, nil, fmt.Errorf("ollama log file: %w", err) + return fmt.Errorf("ollama log file: %w", err) } serveCmd.Stdout = logFile @@ -126,10 +126,13 @@ func startOllama(ctx context.Context, localCtx *localContext) (*exec.Cmd, *os.Fi // Run the ollama serve command in background err = serveCmd.Start() if err != nil { - return nil, nil, fmt.Errorf("start ollama serve: %w", err) + return fmt.Errorf("start ollama serve: %w", err) } - return serveCmd, logFile, nil + localCtx.serveCmd = serveCmd + localCtx.logFile = logFile + + return nil } // waitForOllama Wait until the Ollama process is ready, checking that the log file contains @@ -411,21 +414,10 @@ func (c *OllamaContainer) Start(ctx context.Context) error { return c.Container.Start(ctx) } - c.localCtx.mx.Lock() - - if c.localCtx.serveCmd != nil { - c.localCtx.mx.Unlock() - return nil - } - - serveCmd, logFile, err := startOllama(ctx, c.localCtx) + err := c.localCtx.startOllama(ctx) if err != nil { - c.localCtx.mx.Unlock() return fmt.Errorf("start ollama: %w", err) } - c.localCtx.serveCmd = serveCmd - c.localCtx.logFile = logFile - c.localCtx.mx.Unlock() // unlock before waiting for the process to be ready waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second) defer cancel() From 7fa26eef7ab6800f94da065bbf92d45548e76c46 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 10:24:43 +0100 Subject: [PATCH 28/42] chor: fold --- modules/ollama/local.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index f1458f8f9f..66b49a34d7 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -304,8 +304,7 @@ func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, er // read the version from the ollama binary var buf bytes.Buffer command := prepareExec(ctx, "ollama", []string{"-v"}, c.localCtx.env, &buf) - err = command.Run() - if err != nil { + if err := command.Run(); err != nil { return nil, fmt.Errorf("read ollama -v output: %w", err) } From ebad12ced557c935efb83b27fbdd13fddd55c248 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 11:25:08 +0100 Subject: [PATCH 29/42] chore: merge wait into start --- modules/ollama/local.go | 47 ++++++++++++++++++++--------------------- 1 file changed, 23 insertions(+), 24 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 66b49a34d7..82eaa17678 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -72,16 +72,11 @@ func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, err localCtx: localCtx, } - err := c.localCtx.startOllama(ctx) + err := c.startLocalOllama(ctx) if err != nil { return nil, fmt.Errorf("start ollama: %w", err) } - err = c.waitForOllama(ctx) - if err != nil { - return nil, fmt.Errorf("wait for ollama to start: %w", err) - } - return c, nil } @@ -101,22 +96,22 @@ func logFile() (*os.File, error) { return file, nil } -// startOllama starts the Ollama serve command in the background, writing to the +// startLocalOllama starts the Ollama serve command in the background, writing to the // provided log file. -func (localCtx *localContext) startOllama(ctx context.Context) error { - localCtx.mx.Lock() - defer localCtx.mx.Unlock() // unlock before waiting for the process to be ready - - if localCtx.serveCmd != nil { +func (c *OllamaContainer) startLocalOllama(ctx context.Context) error { + if c.localCtx.serveCmd != nil { return nil } + c.localCtx.mx.Lock() + serveCmd := exec.CommandContext(ctx, "ollama", "serve") - serveCmd.Env = append(serveCmd.Env, localCtx.env...) + serveCmd.Env = append(serveCmd.Env, c.localCtx.env...) serveCmd.Env = append(serveCmd.Env, os.Environ()...) logFile, err := logFile() if err != nil { + c.localCtx.mx.Unlock() return fmt.Errorf("ollama log file: %w", err) } @@ -126,11 +121,23 @@ func (localCtx *localContext) startOllama(ctx context.Context) error { // Run the ollama serve command in background err = serveCmd.Start() if err != nil { + c.localCtx.mx.Unlock() return fmt.Errorf("start ollama serve: %w", err) } - localCtx.serveCmd = serveCmd - localCtx.logFile = logFile + c.localCtx.serveCmd = serveCmd + c.localCtx.logFile = logFile + + // unlock before waiting for the process to be ready + c.localCtx.mx.Unlock() + + waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + + err = c.waitForOllama(waitCtx) + if err != nil { + return fmt.Errorf("wait for ollama to start: %w", err) + } return nil } @@ -413,19 +420,11 @@ func (c *OllamaContainer) Start(ctx context.Context) error { return c.Container.Start(ctx) } - err := c.localCtx.startOllama(ctx) + err := c.startLocalOllama(ctx) if err != nil { return fmt.Errorf("start ollama: %w", err) } - waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - err = c.waitForOllama(waitCtx) - if err != nil { - return fmt.Errorf("wait for ollama to start: %w", err) - } - return nil } From 88f58afe68d0c47ae319ea565548623464d03bee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 11:30:44 +0100 Subject: [PATCH 30/42] fix: use proper ContainersState --- modules/ollama/local.go | 4 ++-- modules/ollama/local_test.go | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 82eaa17678..7607817b36 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -439,7 +439,7 @@ func (c *OllamaContainer) State(ctx context.Context) (*types.ContainerState, err defer c.localCtx.mx.Unlock() if c.localCtx.serveCmd == nil { - return &types.ContainerState{Status: "stopped"}, nil + return &types.ContainerState{Status: "exited"}, nil } // Check if process is still running. Signal(0) is a special case in Unix-like systems. @@ -447,7 +447,7 @@ func (c *OllamaContainer) State(ctx context.Context) (*types.ContainerState, err // - It performs all the normal error checking (permissions, process existence, etc.) // - But it doesn't actually send any signal to the process if err := c.localCtx.serveCmd.Process.Signal(syscall.Signal(0)); err != nil { - return &types.ContainerState{Status: "stopped"}, nil + return &types.ContainerState{Status: "created"}, nil } // Setting the Running field because it's required by the wait strategy diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index 3b95500b72..bb063fb361 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -220,7 +220,7 @@ func TestRun_local(t *testing.T) { state, err := ollamaContainer.State(ctx) require.NoError(t, err) - require.Equal(t, "stopped", state.Status) + require.Equal(t, "exited", state.Status) err = ollamaContainer.Start(ctx) require.NoError(t, err) @@ -257,7 +257,7 @@ func TestRun_local(t *testing.T) { state, err := ollamaContainer.State(ctx) require.NoError(t, err) - require.Equal(t, "stopped", state.Status) + require.Equal(t, "exited", state.Status) }) } From 80c76f913da7d11d8ecbf96ec4b01b4b716d8995 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 11:33:33 +0100 Subject: [PATCH 31/42] fix: remove extra conversion --- modules/ollama/local.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 7607817b36..f957b24361 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -468,7 +468,7 @@ func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { return nil } - if err := c.localCtx.serveCmd.Process.Signal(syscall.Signal(syscall.SIGTERM)); err != nil { + if err := c.localCtx.serveCmd.Process.Signal(syscall.SIGTERM); err != nil { return fmt.Errorf("signal ollama: %w", err) } From 8c0ee3d97372bf149cb166caecd67954bf6f0764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 13:13:14 +0100 Subject: [PATCH 32/42] chore: handle remove log file errors properly --- modules/ollama/local.go | 19 +++++------ modules/ollama/local_unit_test.go | 55 +++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+), 11 deletions(-) create mode 100644 modules/ollama/local_unit_test.go diff --git a/modules/ollama/local.go b/modules/ollama/local.go index f957b24361..ce6be0cd0a 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "io" + "io/fs" "net" "os" "os/exec" @@ -496,18 +497,14 @@ func (c *OllamaContainer) Terminate(ctx context.Context) error { return nil } - // remove the log file if it exists - if _, err = os.Stat(c.localCtx.logFile.Name()); err == nil { - err = c.localCtx.logFile.Close() - if err != nil { - return err - } + var errs []error + if err = c.localCtx.logFile.Close(); err != nil { + errs = append(errs, fmt.Errorf("close log: %w", err)) + } - err = os.Remove(c.localCtx.logFile.Name()) - if err != nil { - return err - } + if err = os.Remove(c.localCtx.logFile.Name()); err != nil && !errors.Is(err, fs.ErrNotExist) { + errs = append(errs, fmt.Errorf("remove log: %w", err)) } - return nil + return errors.Join(errs...) } diff --git a/modules/ollama/local_unit_test.go b/modules/ollama/local_unit_test.go new file mode 100644 index 0000000000..95d9b93638 --- /dev/null +++ b/modules/ollama/local_unit_test.go @@ -0,0 +1,55 @@ +package ollama + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestRun_localWithCustomLogFileError(t *testing.T) { + t.Run("terminate/close-log-error", func(t *testing.T) { + // Create a temporary file for testing + f, err := os.CreateTemp(t.TempDir(), "test-log-*") + require.NoError(t, err) + + // Close the file before termination to force a "file already closed" error + err = f.Close() + require.NoError(t, err) + + c := &OllamaContainer{ + localCtx: &localContext{ + logFile: f, + }, + } + err = c.Terminate(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, "close log:") + }) + + t.Run("terminate/log-file-not-removable", func(t *testing.T) { + // Create a temporary file for testing + f, err := os.CreateTemp(t.TempDir(), "test-log-*") + require.NoError(t, err) + defer func() { + // Cleanup: restore permissions + os.Chmod(filepath.Dir(f.Name()), 0700) + }() + + // Make the file read-only and its parent directory read-only + // This should cause removal to fail on most systems + dir := filepath.Dir(f.Name()) + require.NoError(t, os.Chmod(dir, 0500)) + + c := &OllamaContainer{ + localCtx: &localContext{ + logFile: f, + }, + } + err = c.Terminate(context.Background()) + require.Error(t, err) + require.ErrorContains(t, err, "remove log:") + }) +} From de1339aa3e3d7e2ea38459f45eaaab3f070363fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Mon, 16 Dec 2024 13:20:21 +0100 Subject: [PATCH 33/42] chore: go back to string in env vars --- modules/ollama/examples_test.go | 2 +- modules/ollama/local_test.go | 6 +++--- modules/ollama/options.go | 20 ++++++++++++++------ modules/ollama/options_test.go | 32 ++++++++++++++++++++++++++------ 4 files changed, 44 insertions(+), 16 deletions(-) diff --git a/modules/ollama/examples_test.go b/modules/ollama/examples_test.go index 3601e0b120..188be45bbb 100644 --- a/modules/ollama/examples_test.go +++ b/modules/ollama/examples_test.go @@ -178,7 +178,7 @@ func ExampleRun_withLocal() { ctx := context.Background() // localOllama { - ollamaContainer, err := tcollama.Run(ctx, "ollama/ollama:0.3.13", tcollama.WithUseLocal(map[string]string{"OLLAMA_DEBUG": "true"})) + ollamaContainer, err := tcollama.Run(ctx, "ollama/ollama:0.3.13", tcollama.WithUseLocal("OLLAMA_DEBUG=true")) defer func() { if err := testcontainers.TerminateContainer(ollamaContainer); err != nil { log.Printf("failed to terminate container: %s", err) diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index bb063fb361..7bd073ca5e 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -29,7 +29,7 @@ func TestRun_local(t *testing.T) { ollamaContainer, err := ollama.Run( ctx, "ollama/ollama:0.1.25", - ollama.WithUseLocal(map[string]string{"FOO": "BAR"}), + ollama.WithUseLocal("FOO=BAR"), ) testcontainers.CleanupContainer(t, ollamaContainer) require.NoError(t, err) @@ -266,7 +266,7 @@ func TestRun_localWithCustomLogFile(t *testing.T) { ctx := context.Background() - ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal(map[string]string{"FOO": "BAR"})) + ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal("FOO=BAR")) require.NoError(t, err) testcontainers.CleanupContainer(t, ollamaContainer) @@ -285,7 +285,7 @@ func TestRun_localWithCustomHost(t *testing.T) { ctx := context.Background() - ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal(nil)) + ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal()) require.NoError(t, err) testcontainers.CleanupContainer(t, ollamaContainer) diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 4653b65169..4761a28530 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -2,6 +2,8 @@ package ollama import ( "context" + "fmt" + "strings" "github.com/docker/docker/api/types/container" @@ -42,23 +44,29 @@ var _ testcontainers.ContainerCustomizer = (*UseLocal)(nil) // UseLocal will use the local Ollama instance instead of pulling the Docker image. type UseLocal struct { - env map[string]string + env []string } // WithUseLocal the module will use the local Ollama instance instead of pulling the Docker image. // Pass the environment variables you need to set for the Ollama binary to be used, // in the format of "KEY=VALUE". KeyValue pairs with the wrong format will cause an error. -func WithUseLocal(keyVal map[string]string) UseLocal { - return UseLocal{env: keyVal} +func WithUseLocal(values ...string) UseLocal { + return UseLocal{env: values} } // Customize implements the ContainerCustomizer interface, taking the key value pairs // and setting them as environment variables for the Ollama binary. // In the case of an invalid key value pair, an error is returned. func (u UseLocal) Customize(req *testcontainers.GenericContainerRequest) error { - if len(u.env) == 0 { - return nil + env := make(map[string]string) + for _, kv := range u.env { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid environment variable: %s", kv) + } + + env[parts[0]] = parts[1] } - return testcontainers.WithEnv(u.env)(req) + return testcontainers.WithEnv(env)(req) } diff --git a/modules/ollama/options_test.go b/modules/ollama/options_test.go index 46872d0dd4..f842d15a17 100644 --- a/modules/ollama/options_test.go +++ b/modules/ollama/options_test.go @@ -12,18 +12,38 @@ import ( func TestWithUseLocal(t *testing.T) { req := testcontainers.GenericContainerRequest{} - t.Run("empty", func(t *testing.T) { - opt := ollama.WithUseLocal(nil) + t.Run("keyVal/valid", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models") err := opt.Customize(&req) require.NoError(t, err) - require.Empty(t, req.Env) + require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) + }) + + t.Run("keyVal/invalid", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS") + err := opt.Customize(&req) + require.Error(t, err) + }) + + t.Run("keyVal/valid/multiple", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST=localhost") + err := opt.Customize(&req) + require.NoError(t, err) + require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) + require.Equal(t, "localhost", req.Env["OLLAMA_HOST"]) }) - t.Run("valid", func(t *testing.T) { - opt := ollama.WithUseLocal(map[string]string{"OLLAMA_MODELS": "/path/to/models", "OLLAMA_HOST": "localhost:1234"}) + t.Run("keyVal/valid/multiple-equals", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST=localhost=127.0.0.1") err := opt.Customize(&req) require.NoError(t, err) require.Equal(t, "/path/to/models", req.Env["OLLAMA_MODELS"]) - require.Equal(t, "localhost:1234", req.Env["OLLAMA_HOST"]) + require.Equal(t, "localhost=127.0.0.1", req.Env["OLLAMA_HOST"]) + }) + + t.Run("keyVal/invalid/multiple", func(t *testing.T) { + opt := ollama.WithUseLocal("OLLAMA_MODELS=/path/to/models", "OLLAMA_HOST") + err := opt.Customize(&req) + require.Error(t, err) }) } From 8a18b3b4a40281129fb77649612a61302e368d00 Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Tue, 17 Dec 2024 23:21:03 +0000 Subject: [PATCH 34/42] refactor(ollama): local process Refactor local process handling for Ollama using a container implementation avoiding the wrapping methods. This defaults to running the binary with an ephemeral port to avoid port conflicts. This behaviour can be overridden my setting OLLAMA_HOST either in the parent environment or in the values passed via WithUseLocal. Improve API compatibility with: - Multiplexed output streams - State reporting - Exec option processing - WaitingFor customisation Fix Container implementation: - Port management - Running checks - Terminate processing - Endpoint argument definition - Add missing methods - Consistent environment handling --- modules/ollama/local.go | 771 ++++++++++++++++++------------ modules/ollama/local_test.go | 403 +++++++++++----- modules/ollama/local_unit_test.go | 55 --- modules/ollama/ollama.go | 46 +- modules/ollama/ollama_test.go | 8 + modules/ollama/options.go | 43 +- 6 files changed, 810 insertions(+), 516 deletions(-) delete mode 100644 modules/ollama/local_unit_test.go diff --git a/modules/ollama/local.go b/modules/ollama/local.go index ce6be0cd0a..c7200178d2 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -10,6 +10,7 @@ import ( "net" "os" "os/exec" + "regexp" "strings" "sync" "syscall" @@ -18,6 +19,8 @@ import ( "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/network" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/stdcopy" "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go" @@ -26,485 +29,623 @@ import ( ) const ( - localIP = "127.0.0.1" - localPort = "11434" + localPort = "11434" + localBinary = "ollama" + localServeArg = "serve" + localLogRegex = `Listening on (.*:\d+) \(version\s(.*)\)` + localNamePrefix = "local-ollama" + localHostVar = "OLLAMA_HOST" + localLogVar = "OLLAMA_LOGFILE" ) var ( - defaultStopTimeout = time.Second * 5 - errCopyAPIsNotSupported = errors.New("copy APIs are not supported for local Ollama binary") + // Ensure localContext implements the testcontainers.Container interface. + _ testcontainers.Container = &localProcess{} + + // defaultStopTimeout is the default timeout for stopping the local Ollama process. + defaultStopTimeout = time.Second * 5 + + // zeroTime is the zero time value. + zeroTime time.Time + + // reLogDetails is the regular expression to extract the listening address and version from the log. + reLogDetails = regexp.MustCompile(localLogRegex) ) -// localContext is a type holding the context for local Ollama executions. -type localContext struct { - env []string - serveCmd *exec.Cmd - logFile *os.File - mx sync.Mutex - host string - port string -} +// localProcess emulates the Ollama container using a local process to improve performance. +type localProcess struct { + sessionID string -// runLocal calls the local Ollama binary instead of using a Docker container. -func runLocal(ctx context.Context, env map[string]string) (*OllamaContainer, error) { - // Apply the environment variables to the command. - cmdEnv := make([]string, 0, len(env)*2) - for k, v := range env { - cmdEnv = append(cmdEnv, k+"="+v) - } + // env is the combined environment variables passed to the Ollama binary. + env []string - localCtx := &localContext{ - env: cmdEnv, - host: localIP, - port: localPort, - } + // cmd is the command that runs the Ollama binary, not valid externally if nil. + cmd *exec.Cmd - if envHost := os.Getenv("OLLAMA_HOST"); envHost != "" { - host, port, err := net.SplitHostPort(envHost) - if err != nil { - return nil, fmt.Errorf("invalid OLLAMA_HOST: %w", err) - } + // logName and logFile are the file where the Ollama logs are written. + logName string + logFile *os.File - localCtx.host = host - localCtx.port = port - } + // host, port and version are extracted from log on startup. + host string + port string + version string - c := &OllamaContainer{ - localCtx: localCtx, - } + // waitFor is the strategy to wait for the process to be ready. + waitFor wait.Strategy - err := c.startLocalOllama(ctx) - if err != nil { - return nil, fmt.Errorf("start ollama: %w", err) - } + // done is closed when the process is finished. + done chan struct{} - return c, nil + // wg is used to wait for the process to finish. + wg sync.WaitGroup + + // startedAt is the time when the process started. + startedAt time.Time + + // mtx is used to synchronize access to the process state fields below. + mtx sync.Mutex + + // finishedAt is the time when the process finished. + finishedAt time.Time + + // exitErr is the error returned by the process. + exitErr error } -// logFile returns an existing log file or creates a new one if it doesn't exist. -func logFile() (*os.File, error) { - logName := "local-ollama-" + testcontainers.SessionID() + ".log" +// runLocal returns an OllamaContainer that uses the local Ollama binary instead of using a Docker container. +func runLocal(ctx context.Context, req testcontainers.GenericContainerRequest) (*OllamaContainer, error) { + // TODO: validate the request and return an error if it + // contains any unsupported elements. + + sessionID := testcontainers.SessionID() + local := &localProcess{ + sessionID: sessionID, + env: make([]string, 0, len(req.Env)), + waitFor: req.WaitingFor, + logName: localNamePrefix + "-" + sessionID + ".log", + } + + // Apply the environment variables to the command and + // override the log file if specified. + for k, v := range req.Env { + local.env = append(local.env, k+"="+v) + if k == localLogVar { + local.logName = v + } + } - if envLogName := os.Getenv("OLLAMA_LOGFILE"); envLogName != "" { - logName = envLogName + err := local.Start(ctx) + var c *OllamaContainer + if local.cmd != nil { + c = &OllamaContainer{Container: local} } - file, err := os.Create(logName) if err != nil { - return nil, fmt.Errorf("create ollama log file: %w", err) + return nil, fmt.Errorf("start ollama: %w", err) } - return file, nil + return c, nil } -// startLocalOllama starts the Ollama serve command in the background, writing to the -// provided log file. -func (c *OllamaContainer) startLocalOllama(ctx context.Context) error { - if c.localCtx.serveCmd != nil { - return nil +// Start implements testcontainers.Container interface for the local Ollama binary. +func (c *localProcess) Start(ctx context.Context) error { + if c.IsRunning() { + return errors.New("already running") } - c.localCtx.mx.Lock() + cmd := exec.CommandContext(ctx, localBinary, localServeArg) + cmd.Env = c.env - serveCmd := exec.CommandContext(ctx, "ollama", "serve") - serveCmd.Env = append(serveCmd.Env, c.localCtx.env...) - serveCmd.Env = append(serveCmd.Env, os.Environ()...) - - logFile, err := logFile() + var err error + c.logFile, err = os.Create(c.logName) if err != nil { - c.localCtx.mx.Unlock() - return fmt.Errorf("ollama log file: %w", err) + return fmt.Errorf("create ollama log file: %w", err) } - serveCmd.Stdout = logFile - serveCmd.Stderr = logFile + // Multiplex stdout and stderr to the log file matching the Docker API. + cmd.Stdout = stdcopy.NewStdWriter(c.logFile, stdcopy.Stdout) + cmd.Stderr = stdcopy.NewStdWriter(c.logFile, stdcopy.Stderr) - // Run the ollama serve command in background - err = serveCmd.Start() - if err != nil { - c.localCtx.mx.Unlock() - return fmt.Errorf("start ollama serve: %w", err) + // Run the ollama serve command in background. + if err = cmd.Start(); err != nil { + return fmt.Errorf("start ollama serve: %w", errors.Join(err, c.cleanupLog())) } - c.localCtx.serveCmd = serveCmd - c.localCtx.logFile = logFile + // Past this point, the process was started successfully. + c.cmd = cmd + c.startedAt = time.Now() - // unlock before waiting for the process to be ready - c.localCtx.mx.Unlock() + // Reset the details to allow multiple start / stop cycles. + c.done = make(chan struct{}) + c.mtx.Lock() + c.finishedAt = zeroTime + c.exitErr = nil + c.mtx.Unlock() - waitCtx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() + // Wait for the process to finish in a goroutine. + c.wg.Add(1) + go func() { + defer func() { + c.wg.Done() + close(c.done) + }() - err = c.waitForOllama(waitCtx) - if err != nil { - return fmt.Errorf("wait for ollama to start: %w", err) + err := c.cmd.Wait() + c.mtx.Lock() + defer c.mtx.Unlock() + if err != nil { + c.exitErr = fmt.Errorf("process wait: %w", err) + } + c.finishedAt = time.Now() + }() + + if err = c.waitStrategy(ctx); err != nil { + return fmt.Errorf("wait strategy: %w", err) + } + + if err := c.extractLogDetails(ctx); err != nil { + return fmt.Errorf("extract log details: %w", err) } return nil } -// waitForOllama Wait until the Ollama process is ready, checking that the log file contains -// the "Listening on 127.0.0.1:11434" message -func (c *OllamaContainer) waitForOllama(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - err := wait.ForLog("Listening on "+c.localCtx.host+":"+c.localCtx.port).WaitUntilReady(ctx, c) - if err != nil { - logs, err := c.Logs(ctx) - if err != nil { - return fmt.Errorf("wait for ollama to start: %w", err) +// waitStrategy waits until the Ollama process is ready. +func (c *localProcess) waitStrategy(ctx context.Context) error { + if err := c.waitFor.WaitUntilReady(ctx, c); err != nil { + logs, lerr := c.Logs(ctx) + if lerr != nil { + return errors.Join(err, lerr) } + defer logs.Close() + + var stderr, stdout bytes.Buffer + _, cerr := stdcopy.StdCopy(&stdout, &stderr, logs) - // ignore error as we already have an error and the output is already logged - bs, _ := io.ReadAll(logs) - return fmt.Errorf("wait for ollama to start: %w. Container logs:\n%s", err, string(bs)) + return fmt.Errorf( + "%w (stdout: %s, stderr: %s)", + errors.Join(err, cerr), + strings.TrimSpace(stdout.String()), + strings.TrimSpace(stderr.String()), + ) } return nil } -// ContainerIP returns the IP address of the local Ollama binary. -func (c *OllamaContainer) ContainerIP(ctx context.Context) (string, error) { - if c.localCtx == nil { - return c.Container.ContainerIP(ctx) +// extractLogDetails extracts the listening address and version from the log. +func (c *localProcess) extractLogDetails(ctx context.Context) error { + rc, err := c.Logs(ctx) + if err != nil { + return fmt.Errorf("logs: %w", err) } + defer rc.Close() - return localIP, nil -} - -// ContainerIPs returns a slice with the IP address of the local Ollama binary. -func (c *OllamaContainer) ContainerIPs(ctx context.Context) ([]string, error) { - if c.localCtx == nil { - return c.Container.ContainerIPs(ctx) + bs, err := io.ReadAll(rc) + if err != nil { + return fmt.Errorf("read logs: %w", err) } - return []string{localIP}, nil -} + matches := reLogDetails.FindSubmatch(bs) + if len(matches) != 3 { + return errors.New("address and version not found") + } -// CopyToContainer is a no-op for the local Ollama binary. -func (c *OllamaContainer) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error { - if c.localCtx == nil { - return c.Container.CopyToContainer(ctx, fileContent, containerFilePath, fileMode) + c.host, c.port, err = net.SplitHostPort(string(matches[1])) + if err != nil { + return fmt.Errorf("split host port: %w", err) } - return errCopyAPIsNotSupported -} + // Set OLLAMA_HOST variable to the extracted host so Exec can use it. + c.env = append(c.env, localHostVar+"="+string(matches[1])) + c.version = string(matches[2]) -// CopyDirToContainer is a no-op for the local Ollama binary. -func (c *OllamaContainer) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error { - if c.localCtx == nil { - return c.Container.CopyDirToContainer(ctx, hostDirPath, containerParentPath, fileMode) - } + return nil +} - return errCopyAPIsNotSupported +// ContainerIP implements testcontainers.Container interface for the local Ollama binary. +func (c *localProcess) ContainerIP(ctx context.Context) (string, error) { + return c.host, nil } -// CopyFileToContainer is a no-op for the local Ollama binary. -func (c *OllamaContainer) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error { - if c.localCtx == nil { - return c.Container.CopyFileToContainer(ctx, hostFilePath, containerFilePath, fileMode) - } +// ContainerIPs returns a slice with the IP address of the local Ollama binary. +func (c *localProcess) ContainerIPs(ctx context.Context) ([]string, error) { + return []string{c.host}, nil +} - return errCopyAPIsNotSupported +// CopyToContainer implements testcontainers.Container interface for the local Ollama binary. +// Returns [errors.ErrUnsupported]. +func (c *localProcess) CopyToContainer(ctx context.Context, fileContent []byte, containerFilePath string, fileMode int64) error { + return errors.ErrUnsupported } -// CopyFileFromContainer is a no-op for the local Ollama binary. -func (c *OllamaContainer) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) { - if c.localCtx == nil { - return c.Container.CopyFileFromContainer(ctx, filePath) - } +// CopyDirToContainer implements testcontainers.Container interface for the local Ollama binary. +// Returns [errors.ErrUnsupported]. +func (c *localProcess) CopyDirToContainer(ctx context.Context, hostDirPath string, containerParentPath string, fileMode int64) error { + return errors.ErrUnsupported +} - return nil, errCopyAPIsNotSupported +// CopyFileToContainer implements testcontainers.Container interface for the local Ollama binary. +// Returns [errors.ErrUnsupported]. +func (c *localProcess) CopyFileToContainer(ctx context.Context, hostFilePath string, containerFilePath string, fileMode int64) error { + return errors.ErrUnsupported } -// GetLogProductionErrorChannel returns a nil channel. -func (c *OllamaContainer) GetLogProductionErrorChannel() <-chan error { - if c.localCtx == nil { - return c.Container.GetLogProductionErrorChannel() - } +// CopyFileFromContainer implements testcontainers.Container interface for the local Ollama binary. +// Returns [errors.ErrUnsupported]. +func (c *localProcess) CopyFileFromContainer(ctx context.Context, filePath string) (io.ReadCloser, error) { + return nil, errors.ErrUnsupported +} +// GetLogProductionErrorChannel implements testcontainers.Container interface for the local Ollama binary. +// It returns a nil channel because the local Ollama binary doesn't have a production error channel. +func (c *localProcess) GetLogProductionErrorChannel() <-chan error { return nil } -// Endpoint returns the 127.0.0.1:11434 endpoint for the local Ollama binary. -func (c *OllamaContainer) Endpoint(ctx context.Context, port string) (string, error) { - if c.localCtx == nil { - return c.Container.Endpoint(ctx, port) +// Exec implements testcontainers.Container interface for the local Ollama binary. +// It executes a command using the local Ollama binary and returns the exit status +// of the executed command, an [io.Reader] containing the combined stdout and stderr, +// and any encountered error. +// +// Reading directly from the [io.Reader] may result in unexpected bytes due to custom +// stream multiplexing headers. Use [tcexec.Multiplexed] option to read the combined output +// without the multiplexing headers. +// Alternatively, to separate the stdout and stderr from [io.Reader] and interpret these +// headers properly, [stdcopy.StdCopy] from the Docker API should be used. +func (c *localProcess) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { + if len(cmd) == 0 { + return 1, nil, errors.New("no command provided") + } else if cmd[0] != localBinary { + return 1, nil, fmt.Errorf("command %q: %w", cmd[0], errors.ErrUnsupported) } - return c.localCtx.host + ":" + c.localCtx.port, nil -} - -// Exec executes a command using the local Ollama binary. -func (c *OllamaContainer) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { - if c.localCtx == nil { - return c.Container.Exec(ctx, cmd, options...) - } + command := exec.CommandContext(ctx, cmd[0], cmd[1:]...) + command.Env = c.env - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() + // Multiplex stdout and stderr to the buffer so they can be read separately later. + var buf bytes.Buffer + command.Stdout = stdcopy.NewStdWriter(&buf, stdcopy.Stdout) + command.Stderr = stdcopy.NewStdWriter(&buf, stdcopy.Stderr) - if len(cmd) == 0 { - err := errors.New("exec: no command provided") - return 1, strings.NewReader(err.Error()), err - } else if cmd[0] != "ollama" { - err := fmt.Errorf("%s: %w", cmd[0], errors.ErrUnsupported) - return 1, strings.NewReader(err.Error()), err + // Use process options to customize the command execution + // emulating the Docker API behaviour. + processOptions := tcexec.NewProcessOptions(cmd) + processOptions.Reader = &buf + for _, o := range options { + o.Apply(processOptions) } - args := []string{} - if len(cmd) > 1 { - args = cmd[1:] // prevent when there is only one command + if err := c.validateExecOptions(processOptions.ExecConfig); err != nil { + return 1, nil, fmt.Errorf("validate exec option: %w", err) } - command := prepareExec(ctx, cmd[0], args, c.localCtx.env, c.localCtx.logFile) - err := command.Run() - if err != nil { - return command.ProcessState.ExitCode(), c.localCtx.logFile, fmt.Errorf("exec %v: %w", cmd, err) + if !processOptions.ExecConfig.AttachStderr { + command.Stderr = io.Discard } - - return command.ProcessState.ExitCode(), c.localCtx.logFile, nil -} - -func prepareExec(ctx context.Context, bin string, args []string, env []string, output io.Writer) *exec.Cmd { - command := exec.CommandContext(ctx, bin, args...) - command.Env = append(command.Env, env...) - command.Env = append(command.Env, os.Environ()...) - - command.Stdout = output - command.Stderr = output - - return command -} - -// GetContainerID returns a placeholder ID for local execution -func (c *OllamaContainer) GetContainerID() string { - if c.localCtx == nil { - return c.Container.GetContainerID() + if !processOptions.ExecConfig.AttachStdout { + command.Stdout = io.Discard + } + if processOptions.ExecConfig.AttachStdin { + command.Stdin = os.Stdin } - return "local-ollama-" + testcontainers.SessionID() -} + command.Dir = processOptions.ExecConfig.WorkingDir + command.Env = append(command.Env, processOptions.ExecConfig.Env...) -// Host returns the 127.0.0.1 address for the local Ollama binary. -func (c *OllamaContainer) Host(ctx context.Context) (string, error) { - if c.localCtx == nil { - return c.Container.Host(ctx) + if err := command.Run(); err != nil { + return command.ProcessState.ExitCode(), processOptions.Reader, fmt.Errorf("exec %v: %w", cmd, err) } - return localIP, nil + return command.ProcessState.ExitCode(), processOptions.Reader, nil } -// Inspect returns a ContainerJSON with the state of the local Ollama binary. -// The version is read from the local Ollama binary (ollama -v), and the port -// mapping is set to 11434. -func (c *OllamaContainer) Inspect(ctx context.Context) (*types.ContainerJSON, error) { - if c.localCtx == nil { - return c.Container.Inspect(ctx) +// validateExecOptions checks if the given exec options are supported by the local Ollama binary. +func (c *localProcess) validateExecOptions(options container.ExecOptions) error { + var errs []error + if options.User != "" { + errs = append(errs, fmt.Errorf("user: %w", errors.ErrUnsupported)) } - - state, err := c.State(ctx) - if err != nil { - return nil, fmt.Errorf("get ollama state: %w", err) + if options.Privileged { + errs = append(errs, fmt.Errorf("privileged: %w", errors.ErrUnsupported)) } - - // read the version from the ollama binary - var buf bytes.Buffer - command := prepareExec(ctx, "ollama", []string{"-v"}, c.localCtx.env, &buf) - if err := command.Run(); err != nil { - return nil, fmt.Errorf("read ollama -v output: %w", err) + if options.Tty { + errs = append(errs, fmt.Errorf("tty: %w", errors.ErrUnsupported)) + } + if options.Detach { + errs = append(errs, fmt.Errorf("detach: %w", errors.ErrUnsupported)) + } + if options.DetachKeys != "" { + errs = append(errs, fmt.Errorf("detach keys: %w", errors.ErrUnsupported)) } - bs, err := io.ReadAll(&buf) + return errors.Join(errs...) +} + +// Inspect implements testcontainers.Container interface for the local Ollama binary. +// It returns a ContainerJSON with the state of the local Ollama binary. +func (c *localProcess) Inspect(ctx context.Context) (*types.ContainerJSON, error) { + state, err := c.State(ctx) if err != nil { - return nil, fmt.Errorf("read ollama -v output: %w", err) + return nil, fmt.Errorf("state: %w", err) } return &types.ContainerJSON{ ContainerJSONBase: &types.ContainerJSONBase{ ID: c.GetContainerID(), - Name: "local-ollama-" + testcontainers.SessionID(), + Name: localNamePrefix + "-" + c.sessionID, State: state, }, Config: &container.Config{ - Image: string(bs), + Image: localNamePrefix + ":" + c.version, ExposedPorts: nat.PortSet{ - nat.Port(c.localCtx.port + "/tcp"): struct{}{}, + nat.Port(localPort + "/tcp"): struct{}{}, }, - Hostname: "localhost", - Entrypoint: []string{"ollama", "serve"}, + Hostname: c.host, + Entrypoint: []string{localBinary, localServeArg}, }, NetworkSettings: &types.NetworkSettings{ Networks: map[string]*network.EndpointSettings{}, NetworkSettingsBase: types.NetworkSettingsBase{ Bridge: "bridge", Ports: nat.PortMap{ - nat.Port(c.localCtx.port + "/tcp"): { - {HostIP: c.localCtx.host, HostPort: c.localCtx.port}, + nat.Port(localPort + "/tcp"): { + {HostIP: c.host, HostPort: c.port}, }, }, }, DefaultNetworkSettings: types.DefaultNetworkSettings{ - IPAddress: c.localCtx.host, + IPAddress: c.host, }, }, }, nil } -// IsRunning returns true if the local Ollama process is running. -func (c *OllamaContainer) IsRunning() bool { - if c.localCtx == nil { - return c.Container.IsRunning() +// IsRunning implements testcontainers.Container interface for the local Ollama binary. +// It returns true if the local Ollama process is running, false otherwise. +func (c *localProcess) IsRunning() bool { + if c.startedAt.IsZero() { + // The process hasn't started yet. + return false } - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() - - return c.localCtx.serveCmd != nil + select { + case <-c.done: + // The process exited. + return false + default: + // The process is still running. + return true + } } -// Logs returns the logs from the local Ollama binary. -func (c *OllamaContainer) Logs(ctx context.Context) (io.ReadCloser, error) { - if c.localCtx == nil { - return c.Container.Logs(ctx) +// Logs implements testcontainers.Container interface for the local Ollama binary. +// It returns the logs from the local Ollama binary. +func (c *localProcess) Logs(ctx context.Context) (io.ReadCloser, error) { + file, err := os.Open(c.logFile.Name()) + if err != nil { + return nil, fmt.Errorf("open log file: %w", err) } - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() - - // stream the log file - return os.Open(c.localCtx.logFile.Name()) + return file, nil } -// MappedPort returns the configured port for local Ollama binary. -func (c *OllamaContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { - if c.localCtx == nil { - return c.Container.MappedPort(ctx, port) +// State implements testcontainers.Container interface for the local Ollama binary. +// It returns the current state of the Ollama process, simulating a container state. +func (c *localProcess) State(ctx context.Context) (*types.ContainerState, error) { + c.mtx.Lock() + defer c.mtx.Unlock() + + if !c.IsRunning() { + state := &types.ContainerState{ + Status: "exited", + ExitCode: c.cmd.ProcessState.ExitCode(), + StartedAt: c.startedAt.Format(time.RFC3339Nano), + FinishedAt: c.finishedAt.Format(time.RFC3339Nano), + } + if c.exitErr != nil { + state.Error = c.exitErr.Error() + } + + return state, nil } - // Ollama typically uses port 11434 by default - return nat.Port(c.localCtx.port + "/tcp"), nil + // Setting the Running field because it's required by the wait strategy + // to check if the given log message is present. + return &types.ContainerState{ + Status: "running", + Running: true, + Pid: c.cmd.Process.Pid, + StartedAt: c.startedAt.Format(time.RFC3339Nano), + FinishedAt: c.finishedAt.Format(time.RFC3339Nano), + }, nil } -// Networks returns the networks for local Ollama binary, which is a nil slice. -func (c *OllamaContainer) Networks(ctx context.Context) ([]string, error) { - if c.localCtx == nil { - return c.Container.Networks(ctx) +// Stop implements testcontainers.Container interface for the local Ollama binary. +// It gracefully stops the local Ollama process. +func (c *localProcess) Stop(ctx context.Context, d *time.Duration) error { + if err := c.cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("signal ollama: %w", err) } - return nil, nil + if d != nil { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, *d) + defer cancel() + } + + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.done: + // The process exited. + c.mtx.Lock() + defer c.mtx.Unlock() + + return c.exitErr + } } -// NetworkAliases returns the network aliases for local Ollama binary, which is a nil map. -func (c *OllamaContainer) NetworkAliases(ctx context.Context) (map[string][]string, error) { - if c.localCtx == nil { - return c.Container.NetworkAliases(ctx) +// Terminate implements testcontainers.Container interface for the local Ollama binary. +// It stops the local Ollama process, removing the log file. +func (c *localProcess) Terminate(ctx context.Context) error { + // First try to stop gracefully. + if err := c.Stop(ctx, &defaultStopTimeout); !c.isCleanupSafe(err) { + return fmt.Errorf("stop: %w", err) } - return nil, nil -} + if c.IsRunning() { + // Still running, force kill. + if err := c.cmd.Process.Kill(); !c.isCleanupSafe(err) { + return fmt.Errorf("kill: %w", err) + } -// SessionID returns the session ID for local Ollama binary, which is the session ID -// of the test execution. -func (c *OllamaContainer) SessionID() string { - if c.localCtx == nil { - return c.Container.SessionID() + // Wait for the process to exit so capture any error. + c.wg.Wait() } - return testcontainers.SessionID() + c.mtx.Lock() + exitErr := c.exitErr + c.mtx.Unlock() + + return errors.Join(exitErr, c.cleanupLog()) } -// Start starts the local Ollama process, not failing if it's already running. -func (c *OllamaContainer) Start(ctx context.Context) error { - if c.localCtx == nil { - return c.Container.Start(ctx) +// cleanupLog closes the log file and removes it. +func (c *localProcess) cleanupLog() error { + if c.logFile == nil { + return nil } - err := c.startLocalOllama(ctx) - if err != nil { - return fmt.Errorf("start ollama: %w", err) + var errs []error + if err := c.logFile.Close(); err != nil { + errs = append(errs, fmt.Errorf("close log: %w", err)) } - return nil + if err := os.Remove(c.logFile.Name()); err != nil && !errors.Is(err, fs.ErrNotExist) { + errs = append(errs, fmt.Errorf("remove log: %w", err)) + } + + c.logFile = nil // Prevent double cleanup. + + return errors.Join(errs...) } -// State returns the current state of the Ollama process, simulating a container state -// for local execution. -func (c *OllamaContainer) State(ctx context.Context) (*types.ContainerState, error) { - if c.localCtx == nil { - return c.Container.State(ctx) - } +// Endpoint implements testcontainers.Container interface for the local Ollama binary. +// It returns proto://host:port string for the Ollama port. +// It returns just host:port if proto is blank. +func (c *localProcess) Endpoint(ctx context.Context, proto string) (string, error) { + return c.PortEndpoint(ctx, localPort, proto) +} - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() +// GetContainerID implements testcontainers.Container interface for the local Ollama binary. +func (c *localProcess) GetContainerID() string { + return localNamePrefix + "-" + c.sessionID +} - if c.localCtx.serveCmd == nil { - return &types.ContainerState{Status: "exited"}, nil - } +// Host implements testcontainers.Container interface for the local Ollama binary. +func (c *localProcess) Host(ctx context.Context) (string, error) { + return c.host, nil +} - // Check if process is still running. Signal(0) is a special case in Unix-like systems. - // When you send signal 0 to a process: - // - It performs all the normal error checking (permissions, process existence, etc.) - // - But it doesn't actually send any signal to the process - if err := c.localCtx.serveCmd.Process.Signal(syscall.Signal(0)); err != nil { - return &types.ContainerState{Status: "created"}, nil +// MappedPort implements testcontainers.Container interface for the local Ollama binary. +func (c *localProcess) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) { + if port.Port() != localPort || port.Proto() != "tcp" { + return "", errdefs.NotFound(fmt.Errorf("port %q not found", port)) } - // Setting the Running field because it's required by the wait strategy - // to check if the given log message is present. - return &types.ContainerState{Status: "running", Running: true}, nil + return nat.Port(c.port + "/tcp"), nil } -// Stop gracefully stops the local Ollama process -func (c *OllamaContainer) Stop(ctx context.Context, d *time.Duration) error { - if c.localCtx == nil { - return c.Container.Stop(ctx, d) - } +// Networks implements testcontainers.Container interface for the local Ollama binary. +// It returns a nil slice. +func (c *localProcess) Networks(ctx context.Context) ([]string, error) { + return nil, nil +} - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() +// NetworkAliases implements testcontainers.Container interface for the local Ollama binary. +// It returns a nil map. +func (c *localProcess) NetworkAliases(ctx context.Context) (map[string][]string, error) { + return nil, nil +} - if c.localCtx.serveCmd == nil { - return nil +// PortEndpoint implements testcontainers.Container interface for the local Ollama binary. +// It returns proto://host:port string for the given exposed port. +// It returns just host:port if proto is blank. +func (c *localProcess) PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) { + host, err := c.Host(ctx) + if err != nil { + return "", fmt.Errorf("host: %w", err) } - if err := c.localCtx.serveCmd.Process.Signal(syscall.SIGTERM); err != nil { - return fmt.Errorf("signal ollama: %w", err) + outerPort, err := c.MappedPort(ctx, port) + if err != nil { + return "", fmt.Errorf("mapped port: %w", err) } - c.localCtx.serveCmd = nil + if proto != "" { + proto += "://" + } - return nil + return fmt.Sprintf("%s%s:%s", proto, host, outerPort.Port()), nil } -// Terminate stops the local Ollama process, removing the log file. -func (c *OllamaContainer) Terminate(ctx context.Context) error { - if c.localCtx == nil { - return c.Container.Terminate(ctx) - } +// SessionID implements testcontainers.Container interface for the local Ollama binary. +func (c *localProcess) SessionID() string { + return c.sessionID +} + +// Deprecated: it will be removed in the next major release. +// FollowOutput is not implemented for the local Ollama binary. +// It panics if called. +func (c *localProcess) FollowOutput(consumer testcontainers.LogConsumer) { + panic("not implemented") +} - // First try to stop gracefully - err := c.Stop(ctx, &defaultStopTimeout) +// Deprecated: use c.Inspect(ctx).NetworkSettings.Ports instead. +// Ports gets the exposed ports for the container. +func (c *localProcess) Ports(ctx context.Context) (nat.PortMap, error) { + inspect, err := c.Inspect(ctx) if err != nil { - return fmt.Errorf("stop ollama: %w", err) + return nil, err } - c.localCtx.mx.Lock() - defer c.localCtx.mx.Unlock() + return inspect.NetworkSettings.Ports, nil +} - if c.localCtx.logFile == nil { - return nil - } +// Deprecated: it will be removed in the next major release. +// StartLogProducer implements testcontainers.Container interface for the local Ollama binary. +// It returns an error because the local Ollama binary doesn't have a log producer. +func (c *localProcess) StartLogProducer(context.Context, ...testcontainers.LogProductionOption) error { + return errors.ErrUnsupported +} - var errs []error - if err = c.localCtx.logFile.Close(); err != nil { - errs = append(errs, fmt.Errorf("close log: %w", err)) - } +// Deprecated: it will be removed in the next major release. +// StopLogProducer implements testcontainers.Container interface for the local Ollama binary. +// It returns an error because the local Ollama binary doesn't have a log producer. +func (c *localProcess) StopLogProducer() error { + return errors.ErrUnsupported +} - if err = os.Remove(c.localCtx.logFile.Name()); err != nil && !errors.Is(err, fs.ErrNotExist) { - errs = append(errs, fmt.Errorf("remove log: %w", err)) - } +// Deprecated: Use c.Inspect(ctx).Name instead. +// Name returns the name for the local Ollama binary. +func (c *localProcess) Name(context.Context) (string, error) { + return localNamePrefix + "-" + c.sessionID, nil +} - return errors.Join(errs...) +// isCleanupSafe reports whether all errors in err's tree are one of the +// following, so can safely be ignored: +// - nil +// - os: process already finished +// - context deadline exceeded +func (c *localProcess) isCleanupSafe(err error) bool { + switch { + case err == nil, + errors.Is(err, os.ErrProcessDone), + errors.Is(err, context.DeadlineExceeded): + return true + default: + return false + } } diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index 7bd073ca5e..62dfbb3cac 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -4,12 +4,15 @@ import ( "context" "errors" "io" + "io/fs" "os" "os/exec" "path/filepath" + "regexp" "testing" "time" + "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/strslice" "github.com/stretchr/testify/require" @@ -18,26 +21,52 @@ import ( "github.com/testcontainers/testcontainers-go/modules/ollama" ) +const ( + testImage = "ollama/ollama:latest" + testNatPort = "11434/tcp" + testHost = "127.0.0.1" + testBinary = "ollama" +) + +var ( + reLogDetails = regexp.MustCompile(`Listening on (.*:\d+) \(version\s(.*)\)`) + zeroTime = time.Time{}.Format(time.RFC3339Nano) +) + func TestRun_local(t *testing.T) { // check if the local ollama binary is available - if _, err := exec.LookPath("ollama"); err != nil { + if _, err := exec.LookPath(testBinary); err != nil { t.Skip("local ollama binary not found, skipping") } ctx := context.Background() - ollamaContainer, err := ollama.Run( ctx, - "ollama/ollama:0.1.25", + testImage, ollama.WithUseLocal("FOO=BAR"), ) testcontainers.CleanupContainer(t, ollamaContainer) require.NoError(t, err) + t.Run("state", func(t *testing.T) { + state, err := ollamaContainer.State(ctx) + require.NoError(t, err) + require.NotEmpty(t, state.StartedAt) + require.NotEqual(t, zeroTime, state.StartedAt) + require.NotZero(t, state.Pid) + require.Equal(t, &types.ContainerState{ + Status: "running", + Running: true, + Pid: state.Pid, + StartedAt: state.StartedAt, + FinishedAt: time.Time{}.Format(time.RFC3339Nano), + }, state) + }) + t.Run("connection-string", func(t *testing.T) { connectionStr, err := ollamaContainer.ConnectionString(ctx) require.NoError(t, err) - require.Equal(t, "http://127.0.0.1:11434", connectionStr) + require.NotEmpty(t, connectionStr) }) t.Run("container-id", func(t *testing.T) { @@ -48,11 +77,11 @@ func TestRun_local(t *testing.T) { t.Run("container-ips", func(t *testing.T) { ip, err := ollamaContainer.ContainerIP(ctx) require.NoError(t, err) - require.Equal(t, "127.0.0.1", ip) + require.Equal(t, testHost, ip) ips, err := ollamaContainer.ContainerIPs(ctx) require.NoError(t, err) - require.Equal(t, []string{"127.0.0.1"}, ips) + require.Equal(t, []string{testHost}, ips) }) t.Run("copy", func(t *testing.T) { @@ -76,52 +105,13 @@ func TestRun_local(t *testing.T) { }) t.Run("endpoint", func(t *testing.T) { - endpoint, err := ollamaContainer.Endpoint(ctx, "88888/tcp") - require.NoError(t, err) - require.Equal(t, "127.0.0.1:11434", endpoint) - }) - - t.Run("exec/pull-and-run-model", func(t *testing.T) { - const model = "llama3.2:1b" - - code, r, err := ollamaContainer.Exec(ctx, []string{"ollama", "pull", model}) - require.NoError(t, err) - require.Equal(t, 0, code) - - bs, err := io.ReadAll(r) - require.NoError(t, err) - require.Empty(t, bs) - - code, _, err = ollamaContainer.Exec(ctx, []string{"ollama", "run", model}, tcexec.Multiplexed()) + endpoint, err := ollamaContainer.Endpoint(ctx, "") require.NoError(t, err) - require.Equal(t, 0, code) + require.Contains(t, endpoint, testHost+":") - logs, err := ollamaContainer.Logs(ctx) - require.NoError(t, err) - defer logs.Close() - - bs, err = io.ReadAll(logs) + endpoint, err = ollamaContainer.Endpoint(ctx, "http") require.NoError(t, err) - require.Contains(t, string(bs), "llama runner started") - }) - - t.Run("exec/unsupported-command", func(t *testing.T) { - code, r, err := ollamaContainer.Exec(ctx, []string{"cat", "/etc/passwd"}) - require.Equal(t, 1, code) - require.Error(t, err) - require.ErrorIs(t, err, errors.ErrUnsupported) - - bs, err := io.ReadAll(r) - require.NoError(t, err) - require.Equal(t, "cat: unsupported operation", string(bs)) - - code, r, err = ollamaContainer.Exec(ctx, []string{}) - require.Equal(t, 1, code) - require.Error(t, err) - - bs, err = io.ReadAll(r) - require.NoError(t, err) - require.Equal(t, "exec: no command provided", string(bs)) + require.Contains(t, endpoint, "http://"+testHost+":") }) t.Run("is-running", func(t *testing.T) { @@ -129,20 +119,18 @@ func TestRun_local(t *testing.T) { err = ollamaContainer.Stop(ctx, nil) require.NoError(t, err) - require.False(t, ollamaContainer.IsRunning()) // return it to the running state err = ollamaContainer.Start(ctx) require.NoError(t, err) - require.True(t, ollamaContainer.IsRunning()) }) t.Run("host", func(t *testing.T) { host, err := ollamaContainer.Host(ctx) require.NoError(t, err) - require.Equal(t, "127.0.0.1", host) + require.Equal(t, testHost, host) }) t.Run("inspect", func(t *testing.T) { @@ -153,74 +141,87 @@ func TestRun_local(t *testing.T) { require.Equal(t, "local-ollama-"+testcontainers.SessionID(), inspect.ContainerJSONBase.Name) require.True(t, inspect.ContainerJSONBase.State.Running) - require.Contains(t, string(inspect.Config.Image), "ollama version is") - _, exists := inspect.Config.ExposedPorts["11434/tcp"] + require.NotEmpty(t, inspect.Config.Image) + _, exists := inspect.Config.ExposedPorts[testNatPort] require.True(t, exists) - require.Equal(t, "localhost", inspect.Config.Hostname) - require.Equal(t, strslice.StrSlice(strslice.StrSlice{"ollama", "serve"}), inspect.Config.Entrypoint) + require.Equal(t, testHost, inspect.Config.Hostname) + require.Equal(t, strslice.StrSlice(strslice.StrSlice{testBinary, "serve"}), inspect.Config.Entrypoint) require.Empty(t, inspect.NetworkSettings.Networks) require.Equal(t, "bridge", inspect.NetworkSettings.NetworkSettingsBase.Bridge) ports := inspect.NetworkSettings.NetworkSettingsBase.Ports - _, exists = ports["11434/tcp"] + port, exists := ports[testNatPort] require.True(t, exists) - - require.Equal(t, "127.0.0.1", inspect.NetworkSettings.Ports["11434/tcp"][0].HostIP) - require.Equal(t, "11434", inspect.NetworkSettings.Ports["11434/tcp"][0].HostPort) + require.Len(t, port, 1) + require.Equal(t, testHost, port[0].HostIP) + require.NotEmpty(t, port[0].HostPort) }) t.Run("logfile", func(t *testing.T) { - openFile, err := os.Open("local-ollama-" + testcontainers.SessionID() + ".log") + file, err := os.Open("local-ollama-" + testcontainers.SessionID() + ".log") require.NoError(t, err) - require.NotNil(t, openFile) - require.NoError(t, openFile.Close()) + require.NoError(t, file.Close()) }) t.Run("logs", func(t *testing.T) { logs, err := ollamaContainer.Logs(ctx) require.NoError(t, err) - defer logs.Close() + t.Cleanup(func() { + require.NoError(t, logs.Close()) + }) bs, err := io.ReadAll(logs) require.NoError(t, err) - - require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") + require.Regexp(t, reLogDetails, string(bs)) }) t.Run("mapped-port", func(t *testing.T) { - port, err := ollamaContainer.MappedPort(ctx, "11434/tcp") + port, err := ollamaContainer.MappedPort(ctx, testNatPort) require.NoError(t, err) - require.Equal(t, "11434", port.Port()) + require.NotEmpty(t, port.Port()) require.Equal(t, "tcp", port.Proto()) }) t.Run("networks", func(t *testing.T) { networks, err := ollamaContainer.Networks(ctx) require.NoError(t, err) - require.Empty(t, networks) + require.Nil(t, networks) }) t.Run("network-aliases", func(t *testing.T) { aliases, err := ollamaContainer.NetworkAliases(ctx) require.NoError(t, err) - require.Empty(t, aliases) + require.Nil(t, aliases) + }) + + t.Run("port-endpoint", func(t *testing.T) { + endpoint, err := ollamaContainer.PortEndpoint(ctx, testNatPort, "") + require.NoError(t, err) + require.Regexp(t, regexp.MustCompile(`^127.0.0.1:\d+$`), endpoint) + + endpoint, err = ollamaContainer.PortEndpoint(ctx, testNatPort, "http") + require.NoError(t, err) + require.Regexp(t, regexp.MustCompile(`^http://127.0.0.1:\d+$`), endpoint) }) t.Run("session-id", func(t *testing.T) { - id := ollamaContainer.SessionID() - require.Equal(t, testcontainers.SessionID(), id) + require.Equal(t, testcontainers.SessionID(), ollamaContainer.SessionID()) }) t.Run("stop-start", func(t *testing.T) { d := time.Second * 5 - err := ollamaContainer.Stop(ctx, &d) require.NoError(t, err) state, err := ollamaContainer.State(ctx) require.NoError(t, err) require.Equal(t, "exited", state.Status) + require.NotEmpty(t, state.StartedAt) + require.NotEqual(t, zeroTime, state.StartedAt) + require.NotEmpty(t, state.FinishedAt) + require.NotEqual(t, zeroTime, state.FinishedAt) + require.Zero(t, state.ExitCode) err = ollamaContainer.Start(ctx) require.NoError(t, err) @@ -231,12 +232,13 @@ func TestRun_local(t *testing.T) { logs, err := ollamaContainer.Logs(ctx) require.NoError(t, err) - defer logs.Close() + t.Cleanup(func() { + require.NoError(t, logs.Close()) + }) bs, err := io.ReadAll(logs) require.NoError(t, err) - - require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") + require.Regexp(t, reLogDetails, string(bs)) }) t.Run("start-start", func(t *testing.T) { @@ -245,7 +247,7 @@ func TestRun_local(t *testing.T) { require.Equal(t, "running", state.Status) err = ollamaContainer.Start(ctx) - require.NoError(t, err) + require.Error(t, err) }) t.Run("terminate", func(t *testing.T) { @@ -253,41 +255,126 @@ func TestRun_local(t *testing.T) { require.NoError(t, err) _, err = os.Stat("ollama-" + testcontainers.SessionID() + ".log") - require.True(t, os.IsNotExist(err)) + require.ErrorIs(t, err, fs.ErrNotExist) state, err := ollamaContainer.State(ctx) require.NoError(t, err) - require.Equal(t, "exited", state.Status) + require.NotEmpty(t, state.StartedAt) + require.NotEqual(t, zeroTime, state.StartedAt) + require.NotEmpty(t, state.FinishedAt) + require.NotEqual(t, zeroTime, state.FinishedAt) + require.Equal(t, &types.ContainerState{ + Status: "exited", + StartedAt: state.StartedAt, + FinishedAt: state.FinishedAt, + }, state) + }) + + t.Run("deprecated", func(t *testing.T) { + t.Run("ports", func(t *testing.T) { + inspect, err := ollamaContainer.Inspect(ctx) + require.NoError(t, err) + + ports, err := ollamaContainer.Ports(ctx) + require.NoError(t, err) + require.Equal(t, inspect.NetworkSettings.Ports, ports) + }) + + t.Run("follow-output", func(t *testing.T) { + require.Panics(t, func() { + ollamaContainer.FollowOutput(&testcontainers.StdoutLogConsumer{}) + }) + }) + + t.Run("start-log-producer", func(t *testing.T) { + err := ollamaContainer.StartLogProducer(ctx) + require.ErrorIs(t, err, errors.ErrUnsupported) + }) + + t.Run("stop-log-producer", func(t *testing.T) { + err := ollamaContainer.StopLogProducer() + require.ErrorIs(t, err, errors.ErrUnsupported) + }) + + t.Run("name", func(t *testing.T) { + name, err := ollamaContainer.Name(ctx) + require.NoError(t, err) + require.Equal(t, "local-ollama-"+testcontainers.SessionID(), name) + }) }) } func TestRun_localWithCustomLogFile(t *testing.T) { - t.Setenv("OLLAMA_LOGFILE", filepath.Join(t.TempDir(), "server.log")) - ctx := context.Background() + logFile := filepath.Join(t.TempDir(), "server.log") - ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal("FOO=BAR")) - require.NoError(t, err) - testcontainers.CleanupContainer(t, ollamaContainer) + t.Run("parent-env", func(t *testing.T) { + t.Setenv("OLLAMA_LOGFILE", logFile) - logs, err := ollamaContainer.Logs(ctx) - require.NoError(t, err) - defer logs.Close() + ollamaContainer, err := ollama.Run(ctx, testImage, ollama.WithUseLocal()) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) - bs, err := io.ReadAll(logs) - require.NoError(t, err) + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, logs.Close()) + }) + + bs, err := io.ReadAll(logs) + require.NoError(t, err) + require.Regexp(t, reLogDetails, string(bs)) + + file, ok := logs.(*os.File) + require.True(t, ok) + require.Equal(t, logFile, file.Name()) + }) + + t.Run("local-env", func(t *testing.T) { + ollamaContainer, err := ollama.Run(ctx, testImage, ollama.WithUseLocal("OLLAMA_LOGFILE="+logFile)) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, logs.Close()) + }) + + bs, err := io.ReadAll(logs) + require.NoError(t, err) + require.Regexp(t, reLogDetails, string(bs)) - require.Contains(t, string(bs), "Listening on 127.0.0.1:11434") + file, ok := logs.(*os.File) + require.True(t, ok) + require.Equal(t, logFile, file.Name()) + }) } func TestRun_localWithCustomHost(t *testing.T) { - t.Setenv("OLLAMA_HOST", "127.0.0.1:1234") - ctx := context.Background() - ollamaContainer, err := ollama.Run(ctx, "ollama/ollama:0.1.25", ollama.WithUseLocal()) - require.NoError(t, err) - testcontainers.CleanupContainer(t, ollamaContainer) + t.Run("parent-env", func(t *testing.T) { + t.Setenv("OLLAMA_HOST", "127.0.0.1:1234") + + ollamaContainer, err := ollama.Run(ctx, testImage, ollama.WithUseLocal()) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + + testRun_localWithCustomHost(ctx, t, ollamaContainer) + }) + + t.Run("local-env", func(t *testing.T) { + ollamaContainer, err := ollama.Run(ctx, testImage, ollama.WithUseLocal("OLLAMA_HOST=127.0.0.1:1234")) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + + testRun_localWithCustomHost(ctx, t, ollamaContainer) + }) +} + +func testRun_localWithCustomHost(ctx context.Context, t *testing.T, ollamaContainer *ollama.OllamaContainer) { + t.Helper() t.Run("connection-string", func(t *testing.T) { connectionStr, err := ollamaContainer.ConnectionString(ctx) @@ -296,36 +383,38 @@ func TestRun_localWithCustomHost(t *testing.T) { }) t.Run("endpoint", func(t *testing.T) { - endpoint, err := ollamaContainer.Endpoint(ctx, "1234/tcp") + endpoint, err := ollamaContainer.Endpoint(ctx, "http") require.NoError(t, err) - require.Equal(t, "127.0.0.1:1234", endpoint) + require.Equal(t, "http://127.0.0.1:1234", endpoint) }) t.Run("inspect", func(t *testing.T) { inspect, err := ollamaContainer.Inspect(ctx) require.NoError(t, err) + require.Regexp(t, regexp.MustCompile(`^local-ollama:\d+\.\d+\.\d+$`), inspect.Config.Image) - require.Contains(t, string(inspect.Config.Image), "ollama version is") - _, exists := inspect.Config.ExposedPorts["1234/tcp"] + _, exists := inspect.Config.ExposedPorts[testNatPort] require.True(t, exists) - require.Equal(t, "localhost", inspect.Config.Hostname) - require.Equal(t, strslice.StrSlice(strslice.StrSlice{"ollama", "serve"}), inspect.Config.Entrypoint) + require.Equal(t, testHost, inspect.Config.Hostname) + require.Equal(t, strslice.StrSlice(strslice.StrSlice{testBinary, "serve"}), inspect.Config.Entrypoint) require.Empty(t, inspect.NetworkSettings.Networks) require.Equal(t, "bridge", inspect.NetworkSettings.NetworkSettingsBase.Bridge) ports := inspect.NetworkSettings.NetworkSettingsBase.Ports - _, exists = ports["1234/tcp"] + port, exists := ports[testNatPort] require.True(t, exists) - - require.Equal(t, "127.0.0.1", inspect.NetworkSettings.Ports["1234/tcp"][0].HostIP) - require.Equal(t, "1234", inspect.NetworkSettings.Ports["1234/tcp"][0].HostPort) + require.Len(t, port, 1) + require.Equal(t, testHost, port[0].HostIP) + require.Equal(t, "1234", port[0].HostPort) }) t.Run("logs", func(t *testing.T) { logs, err := ollamaContainer.Logs(ctx) require.NoError(t, err) - defer logs.Close() + t.Cleanup(func() { + require.NoError(t, logs.Close()) + }) bs, err := io.ReadAll(logs) require.NoError(t, err) @@ -334,9 +423,109 @@ func TestRun_localWithCustomHost(t *testing.T) { }) t.Run("mapped-port", func(t *testing.T) { - port, err := ollamaContainer.MappedPort(ctx, "1234/tcp") + port, err := ollamaContainer.MappedPort(ctx, testNatPort) require.NoError(t, err) require.Equal(t, "1234", port.Port()) require.Equal(t, "tcp", port.Proto()) }) } + +func TestRun_localExec(t *testing.T) { + // check if the local ollama binary is available + if _, err := exec.LookPath(testBinary); err != nil { + t.Skip("local ollama binary not found, skipping") + } + + ctx := context.Background() + + ollamaContainer, err := ollama.Run(ctx, testImage, ollama.WithUseLocal()) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + + t.Run("no-command", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, nil) + require.Error(t, err) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("unsupported-command", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{"cat", "/etc/hosts"}) + require.ErrorIs(t, err, errors.ErrUnsupported) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("unsupported-option-user", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{testBinary, "-v"}, tcexec.WithUser("root")) + require.ErrorIs(t, err, errors.ErrUnsupported) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("unsupported-option-privileged", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{testBinary, "-v"}, tcexec.ProcessOptionFunc(func(opts *tcexec.ProcessOptions) { + opts.ExecConfig.Privileged = true + })) + require.ErrorIs(t, err, errors.ErrUnsupported) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("unsupported-option-tty", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{testBinary, "-v"}, tcexec.ProcessOptionFunc(func(opts *tcexec.ProcessOptions) { + opts.ExecConfig.Tty = true + })) + require.ErrorIs(t, err, errors.ErrUnsupported) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("unsupported-option-detach", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{testBinary, "-v"}, tcexec.ProcessOptionFunc(func(opts *tcexec.ProcessOptions) { + opts.ExecConfig.Detach = true + })) + require.ErrorIs(t, err, errors.ErrUnsupported) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("unsupported-option-detach-keys", func(t *testing.T) { + code, r, err := ollamaContainer.Exec(ctx, []string{testBinary, "-v"}, tcexec.ProcessOptionFunc(func(opts *tcexec.ProcessOptions) { + opts.ExecConfig.DetachKeys = "ctrl-p,ctrl-q" + })) + require.ErrorIs(t, err, errors.ErrUnsupported) + require.Equal(t, 1, code) + require.Nil(t, r) + }) + + t.Run("pull-and-run-model", func(t *testing.T) { + const model = "llama3.2:1b" + + code, r, err := ollamaContainer.Exec(ctx, []string{testBinary, "pull", model}) + require.NoError(t, err) + require.Zero(t, code) + + bs, err := io.ReadAll(r) + require.NoError(t, err) + require.Contains(t, string(bs), "success") + + code, r, err = ollamaContainer.Exec(ctx, []string{testBinary, "run", model}, tcexec.Multiplexed()) + require.NoError(t, err) + require.Zero(t, code) + + bs, err = io.ReadAll(r) + require.NoError(t, err) + require.Empty(t, bs) + + logs, err := ollamaContainer.Logs(ctx) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, logs.Close()) + }) + + bs, err = io.ReadAll(logs) + require.NoError(t, err) + require.Contains(t, string(bs), "llama runner started") + }) +} diff --git a/modules/ollama/local_unit_test.go b/modules/ollama/local_unit_test.go deleted file mode 100644 index 95d9b93638..0000000000 --- a/modules/ollama/local_unit_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package ollama - -import ( - "context" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestRun_localWithCustomLogFileError(t *testing.T) { - t.Run("terminate/close-log-error", func(t *testing.T) { - // Create a temporary file for testing - f, err := os.CreateTemp(t.TempDir(), "test-log-*") - require.NoError(t, err) - - // Close the file before termination to force a "file already closed" error - err = f.Close() - require.NoError(t, err) - - c := &OllamaContainer{ - localCtx: &localContext{ - logFile: f, - }, - } - err = c.Terminate(context.Background()) - require.Error(t, err) - require.ErrorContains(t, err, "close log:") - }) - - t.Run("terminate/log-file-not-removable", func(t *testing.T) { - // Create a temporary file for testing - f, err := os.CreateTemp(t.TempDir(), "test-log-*") - require.NoError(t, err) - defer func() { - // Cleanup: restore permissions - os.Chmod(filepath.Dir(f.Name()), 0700) - }() - - // Make the file read-only and its parent directory read-only - // This should cause removal to fail on most systems - dir := filepath.Dir(f.Name()) - require.NoError(t, os.Chmod(dir, 0500)) - - c := &OllamaContainer{ - localCtx: &localContext{ - logFile: f, - }, - } - err = c.Terminate(context.Background()) - require.Error(t, err) - require.ErrorContains(t, err, "remove log:") - }) -} diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index 3d0cc6fa4e..15f0d7de47 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -20,24 +20,19 @@ const DefaultOllamaImage = "ollama/ollama:0.1.25" // OllamaContainer represents the Ollama container type used in the module type OllamaContainer struct { testcontainers.Container - localCtx *localContext } // ConnectionString returns the connection string for the Ollama container, // using the default port 11434. func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) { - if c.localCtx != nil { - return "http://" + c.localCtx.host + ":" + c.localCtx.port, nil - } - host, err := c.Host(ctx) if err != nil { - return "", err + return "", fmt.Errorf("host: %w", err) } port, err := c.MappedPort(ctx, "11434/tcp") if err != nil { - return "", err + return "", fmt.Errorf("mapped port: %w", err) } return fmt.Sprintf("http://%s:%d", host, port.Int()), nil @@ -48,7 +43,7 @@ func (c *OllamaContainer) ConnectionString(ctx context.Context) (string, error) // of the container into a new image with the given name, so it doesn't override existing images. // It should be used for creating an image that contains a loaded model. func (c *OllamaContainer) Commit(ctx context.Context, targetImage string) error { - if c.localCtx != nil { + if _, ok := c.Container.(*localProcess); ok { return nil } @@ -89,40 +84,33 @@ func RunContainer(ctx context.Context, opts ...testcontainers.ContainerCustomize // Run creates an instance of the Ollama container type func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*OllamaContainer, error) { - req := testcontainers.ContainerRequest{ - Image: img, - ExposedPorts: []string{"11434/tcp"}, - WaitingFor: wait.ForListeningPort("11434/tcp").WithStartupTimeout(60 * time.Second), - } - - genericContainerReq := testcontainers.GenericContainerRequest{ - ContainerRequest: req, - Started: true, + req := testcontainers.GenericContainerRequest{ + ContainerRequest: testcontainers.ContainerRequest{ + Image: img, + ExposedPorts: []string{"11434/tcp"}, + WaitingFor: wait.ForListeningPort("11434/tcp").WithStartupTimeout(60 * time.Second), + }, + Started: true, } // always request a GPU if the host supports it opts = append(opts, withGpu()) - useLocal := false + var local bool for _, opt := range opts { - if err := opt.Customize(&genericContainerReq); err != nil { + if err := opt.Customize(&req); err != nil { return nil, fmt.Errorf("customize: %w", err) } - if _, ok := opt.(UseLocal); ok { - useLocal = true + if _, ok := opt.(useLocal); ok { + local = true } } - if useLocal { - container, err := runLocal(ctx, req.Env) - if err == nil { - return container, nil - } - - testcontainers.Logger.Printf("failed to run local ollama: %v, switching to docker", err) + if local { + return runLocal(ctx, req) } - container, err := testcontainers.GenericContainer(ctx, genericContainerReq) + container, err := testcontainers.GenericContainer(ctx, req) var c *OllamaContainer if container != nil { c = &OllamaContainer{Container: container} diff --git a/modules/ollama/ollama_test.go b/modules/ollama/ollama_test.go index 94212dc171..50e383d682 100644 --- a/modules/ollama/ollama_test.go +++ b/modules/ollama/ollama_test.go @@ -16,6 +16,14 @@ import ( "github.com/testcontainers/testcontainers-go/modules/ollama" ) +func TestOllamaBasic(t *testing.T) { + ctx := context.Background() + + ctr, err := ollama.Run(ctx, "ollama/ollama:0.1.25") + testcontainers.CleanupContainer(t, ctr) + require.NoError(t, err) +} + func TestOllama(t *testing.T) { ctx := context.Background() diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 4761a28530..88763c7c76 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -3,17 +3,19 @@ package ollama import ( "context" "fmt" + "os" "strings" "github.com/docker/docker/api/types/container" "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/wait" ) var noopCustomizeRequestOption = func(req *testcontainers.GenericContainerRequest) error { return nil } // withGpu requests a GPU for the container, which could improve performance for some models. -// This option will be automaticall added to the Ollama container to check if the host supports nvidia. +// This option will be automatically added to the Ollama container to check if the host supports nvidia. func withGpu() testcontainers.CustomizeRequestOption { cli, err := testcontainers.NewDockerClientWithOpts(context.Background()) if err != nil { @@ -40,29 +42,50 @@ func withGpu() testcontainers.CustomizeRequestOption { }) } -var _ testcontainers.ContainerCustomizer = (*UseLocal)(nil) +var _ testcontainers.ContainerCustomizer = (*useLocal)(nil) -// UseLocal will use the local Ollama instance instead of pulling the Docker image. -type UseLocal struct { +// useLocal will use the local Ollama instance instead of pulling the Docker image. +type useLocal struct { env []string } // WithUseLocal the module will use the local Ollama instance instead of pulling the Docker image. // Pass the environment variables you need to set for the Ollama binary to be used, // in the format of "KEY=VALUE". KeyValue pairs with the wrong format will cause an error. -func WithUseLocal(values ...string) UseLocal { - return UseLocal{env: values} +func WithUseLocal(values ...string) useLocal { + return useLocal{env: values} } // Customize implements the ContainerCustomizer interface, taking the key value pairs // and setting them as environment variables for the Ollama binary. // In the case of an invalid key value pair, an error is returned. -func (u UseLocal) Customize(req *testcontainers.GenericContainerRequest) error { - env := make(map[string]string) - for _, kv := range u.env { +func (u useLocal) Customize(req *testcontainers.GenericContainerRequest) error { + // Replace the default host port strategy with one that waits for a log entry. + if err := wait.Walk(&req.WaitingFor, func(w wait.Strategy) error { + if _, ok := w.(*wait.HostPortStrategy); ok { + return wait.VisitRemove + } + + return nil + }); err != nil { + return fmt.Errorf("walk strategies: %w", err) + } + + logStrategy := wait.ForLog(localLogRegex).AsRegexp() + if req.WaitingFor == nil { + req.WaitingFor = logStrategy + } else { + req.WaitingFor = wait.ForAll(req.WaitingFor, logStrategy) + } + + osEnv := os.Environ() + env := make(map[string]string, len(osEnv)+len(u.env)+1) + // Use a random port to avoid conflicts by default. + env[localHostVar] = "localhost:0" + for _, kv := range append(osEnv, u.env...) { parts := strings.SplitN(kv, "=", 2) if len(parts) != 2 { - return fmt.Errorf("invalid environment variable: %s", kv) + return fmt.Errorf("invalid environment variable: %q", kv) } env[parts[0]] = parts[1] From 2a3a30db27ee6c068eda615bb76b8972355197bf Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Wed, 18 Dec 2024 18:58:30 +0000 Subject: [PATCH 35/42] chore(ollama): refactor local to use log sub match. Refactor local processing to use the new log sub match functionality. --- modules/ollama/local.go | 123 +++++++++++++++++++++-------------- modules/ollama/local_test.go | 1 + modules/ollama/ollama.go | 13 ++-- modules/ollama/options.go | 63 +++--------------- 4 files changed, 92 insertions(+), 108 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index c7200178d2..7ea758ac33 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -10,7 +10,6 @@ import ( "net" "os" "os/exec" - "regexp" "strings" "sync" "syscall" @@ -39,17 +38,15 @@ const ( ) var ( - // Ensure localContext implements the testcontainers.Container interface. - _ testcontainers.Container = &localProcess{} + // Ensure localProcess implements the required interfaces. + _ testcontainers.Container = (*localProcess)(nil) + _ testcontainers.ContainerCustomizer = (*localProcess)(nil) // defaultStopTimeout is the default timeout for stopping the local Ollama process. defaultStopTimeout = time.Second * 5 // zeroTime is the zero time value. zeroTime time.Time - - // reLogDetails is the regular expression to extract the listening address and version from the log. - reLogDetails = regexp.MustCompile(localLogRegex) ) // localProcess emulates the Ollama container using a local process to improve performance. @@ -94,38 +91,31 @@ type localProcess struct { } // runLocal returns an OllamaContainer that uses the local Ollama binary instead of using a Docker container. -func runLocal(ctx context.Context, req testcontainers.GenericContainerRequest) (*OllamaContainer, error) { +func (c *localProcess) run(ctx context.Context, req testcontainers.GenericContainerRequest) (*OllamaContainer, error) { // TODO: validate the request and return an error if it // contains any unsupported elements. - sessionID := testcontainers.SessionID() - local := &localProcess{ - sessionID: sessionID, - env: make([]string, 0, len(req.Env)), - waitFor: req.WaitingFor, - logName: localNamePrefix + "-" + sessionID + ".log", - } - - // Apply the environment variables to the command and - // override the log file if specified. + // Apply the updated details from the request. + c.waitFor = req.WaitingFor + c.env = c.env[:0] for k, v := range req.Env { - local.env = append(local.env, k+"="+v) + c.env = append(c.env, k+"="+v) if k == localLogVar { - local.logName = v + c.logName = v } } - err := local.Start(ctx) - var c *OllamaContainer - if local.cmd != nil { - c = &OllamaContainer{Container: local} + err := c.Start(ctx) + var container *OllamaContainer + if c.cmd != nil { + container = &OllamaContainer{Container: c} } if err != nil { - return nil, fmt.Errorf("start ollama: %w", err) + return container, fmt.Errorf("start ollama: %w", err) } - return c, nil + return container, nil } // Start implements testcontainers.Container interface for the local Ollama binary. @@ -184,10 +174,6 @@ func (c *localProcess) Start(ctx context.Context) error { return fmt.Errorf("wait strategy: %w", err) } - if err := c.extractLogDetails(ctx); err != nil { - return fmt.Errorf("extract log details: %w", err) - } - return nil } @@ -215,33 +201,32 @@ func (c *localProcess) waitStrategy(ctx context.Context) error { } // extractLogDetails extracts the listening address and version from the log. -func (c *localProcess) extractLogDetails(ctx context.Context) error { - rc, err := c.Logs(ctx) - if err != nil { - return fmt.Errorf("logs: %w", err) - } - defer rc.Close() +func (c *localProcess) extractLogDetails(pattern string, submatches [][][]byte) error { + var err error + for _, matches := range submatches { + if len(matches) != 3 { + err = fmt.Errorf("`%s` matched %d times, expected %d", pattern, len(matches), 3) + continue + } - bs, err := io.ReadAll(rc) - if err != nil { - return fmt.Errorf("read logs: %w", err) - } + c.host, c.port, err = net.SplitHostPort(string(matches[1])) + if err != nil { + return wait.NewPermanentError(fmt.Errorf("split host port: %w", err)) + } - matches := reLogDetails.FindSubmatch(bs) - if len(matches) != 3 { - return errors.New("address and version not found") + // Set OLLAMA_HOST variable to the extracted host so Exec can use it. + c.env = append(c.env, localHostVar+"="+string(matches[1])) + c.version = string(matches[2]) + + return nil } - c.host, c.port, err = net.SplitHostPort(string(matches[1])) if err != nil { - return fmt.Errorf("split host port: %w", err) + // Return the last error encountered. + return err } - // Set OLLAMA_HOST variable to the extracted host so Exec can use it. - c.env = append(c.env, localHostVar+"="+string(matches[1])) - c.version = string(matches[2]) - - return nil + return fmt.Errorf("address and version not found: `%s` no matches", pattern) } // ContainerIP implements testcontainers.Container interface for the local Ollama binary. @@ -634,6 +619,46 @@ func (c *localProcess) Name(context.Context) (string, error) { return localNamePrefix + "-" + c.sessionID, nil } +// Customize implements the [testcontainers.ContainerCustomizer] interface. +// It configures the environment variables set by [WithUseLocal] and sets up +// the wait strategy to extract the host, port and version from the log. +func (c *localProcess) Customize(req *testcontainers.GenericContainerRequest) error { + // Replace the default host port strategy with one that waits for a log entry + // and extracts the host, port and version from it. + if err := wait.Walk(&req.WaitingFor, func(w wait.Strategy) error { + if _, ok := w.(*wait.HostPortStrategy); ok { + return wait.VisitRemove + } + + return nil + }); err != nil { + return fmt.Errorf("walk strategies: %w", err) + } + + logStrategy := wait.ForLog(localLogRegex).Submatch(c.extractLogDetails) + if req.WaitingFor == nil { + req.WaitingFor = logStrategy + } else { + req.WaitingFor = wait.ForAll(req.WaitingFor, logStrategy) + } + + // Setup the environment variables using a random port by default + // to avoid conflicts. + osEnv := os.Environ() + env := make(map[string]string, len(osEnv)+len(c.env)+1) + env[localHostVar] = "localhost:0" + for _, kv := range append(osEnv, c.env...) { + parts := strings.SplitN(kv, "=", 2) + if len(parts) != 2 { + return fmt.Errorf("invalid environment variable: %q", kv) + } + + env[parts[0]] = parts[1] + } + + return testcontainers.WithEnv(env)(req) +} + // isCleanupSafe reports whether all errors in err's tree are one of the // following, so can safely be ignored: // - nil diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index 62dfbb3cac..e2ab0c78ac 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -29,6 +29,7 @@ const ( ) var ( + // reLogDetails matches the log details of the local ollama binary and should match localLogRegex. reLogDetails = regexp.MustCompile(`Listening on (.*:\d+) \(version\s(.*)\)`) zeroTime = time.Time{}.Format(time.RFC3339Nano) ) diff --git a/modules/ollama/ollama.go b/modules/ollama/ollama.go index 15f0d7de47..4d78fa171e 100644 --- a/modules/ollama/ollama.go +++ b/modules/ollama/ollama.go @@ -93,21 +93,22 @@ func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustom Started: true, } - // always request a GPU if the host supports it + // Always request a GPU if the host supports it. opts = append(opts, withGpu()) - var local bool + var local *localProcess for _, opt := range opts { if err := opt.Customize(&req); err != nil { return nil, fmt.Errorf("customize: %w", err) } - if _, ok := opt.(useLocal); ok { - local = true + if l, ok := opt.(*localProcess); ok { + local = l } } - if local { - return runLocal(ctx, req) + // Now we have processed all the options, we can check if we need to use the local process. + if local != nil { + return local.run(ctx, req) } container, err := testcontainers.GenericContainer(ctx, req) diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 88763c7c76..1b345f6e9e 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -2,14 +2,10 @@ package ollama import ( "context" - "fmt" - "os" - "strings" "github.com/docker/docker/api/types/container" "github.com/testcontainers/testcontainers-go" - "github.com/testcontainers/testcontainers-go/wait" ) var noopCustomizeRequestOption = func(req *testcontainers.GenericContainerRequest) error { return nil } @@ -42,54 +38,15 @@ func withGpu() testcontainers.CustomizeRequestOption { }) } -var _ testcontainers.ContainerCustomizer = (*useLocal)(nil) - -// useLocal will use the local Ollama instance instead of pulling the Docker image. -type useLocal struct { - env []string -} - -// WithUseLocal the module will use the local Ollama instance instead of pulling the Docker image. -// Pass the environment variables you need to set for the Ollama binary to be used, -// in the format of "KEY=VALUE". KeyValue pairs with the wrong format will cause an error. -func WithUseLocal(values ...string) useLocal { - return useLocal{env: values} -} - -// Customize implements the ContainerCustomizer interface, taking the key value pairs -// and setting them as environment variables for the Ollama binary. -// In the case of an invalid key value pair, an error is returned. -func (u useLocal) Customize(req *testcontainers.GenericContainerRequest) error { - // Replace the default host port strategy with one that waits for a log entry. - if err := wait.Walk(&req.WaitingFor, func(w wait.Strategy) error { - if _, ok := w.(*wait.HostPortStrategy); ok { - return wait.VisitRemove - } - - return nil - }); err != nil { - return fmt.Errorf("walk strategies: %w", err) +// WithUseLocal starts a local Ollama process with the given environment in +// KEY=VALUE for instead of a Docker container which can be more performant +// as it has direct access to the GPU. +// By default OLLAMA_HOST is set to localhost:0 to avoid port conflicts. +func WithUseLocal(envKeyValues ...string) *localProcess { + sessionID := testcontainers.SessionID() + return &localProcess{ + sessionID: sessionID, + logName: localNamePrefix + "-" + sessionID + ".log", + env: envKeyValues, } - - logStrategy := wait.ForLog(localLogRegex).AsRegexp() - if req.WaitingFor == nil { - req.WaitingFor = logStrategy - } else { - req.WaitingFor = wait.ForAll(req.WaitingFor, logStrategy) - } - - osEnv := os.Environ() - env := make(map[string]string, len(osEnv)+len(u.env)+1) - // Use a random port to avoid conflicts by default. - env[localHostVar] = "localhost:0" - for _, kv := range append(osEnv, u.env...) { - parts := strings.SplitN(kv, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid environment variable: %q", kv) - } - - env[parts[0]] = parts[1] - } - - return testcontainers.WithEnv(env)(req) } From 5e77b278fccb349264a0c5277a259a90e23008ec Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Wed, 18 Dec 2024 18:59:58 +0000 Subject: [PATCH 36/42] feat(ollama): validate container request Validate the container request to ensure the user configuration can be processed and no fields that would be ignored are present. --- modules/ollama/local.go | 52 +++++++++++++++++++++++++++-- modules/ollama/local_test.go | 64 ++++++++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+), 2 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 7ea758ac33..78d8f70c36 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -10,6 +10,7 @@ import ( "net" "os" "os/exec" + "reflect" "strings" "sync" "syscall" @@ -92,8 +93,9 @@ type localProcess struct { // runLocal returns an OllamaContainer that uses the local Ollama binary instead of using a Docker container. func (c *localProcess) run(ctx context.Context, req testcontainers.GenericContainerRequest) (*OllamaContainer, error) { - // TODO: validate the request and return an error if it - // contains any unsupported elements. + if err := c.validateRequest(req); err != nil { + return nil, fmt.Errorf("validate request: %w", err) + } // Apply the updated details from the request. c.waitFor = req.WaitingFor @@ -118,6 +120,52 @@ func (c *localProcess) run(ctx context.Context, req testcontainers.GenericContai return container, nil } +// validateRequest checks that req is valid for the local Ollama binary. +func (c *localProcess) validateRequest(req testcontainers.GenericContainerRequest) error { + var errs []error + if req.WaitingFor == nil { + errs = append(errs, errors.New("ContainerRequest.WaitingFor must be set")) + } + + if !req.Started { + errs = append(errs, errors.New("Started must be true")) + } + + if !reflect.DeepEqual(req.ExposedPorts, []string{localPort + "/tcp"}) { + errs = append(errs, fmt.Errorf("ContainerRequest.ExposedPorts must be %s/tcp got: %s", localPort, req.ExposedPorts)) + } + + // Reset fields we support to their zero values. + req.Env = nil + req.ExposedPorts = nil + req.WaitingFor = nil + req.Image = "" // We just ignore the image. + req.Started = false + req.Logger = nil // We don't need the logger. + + parts := make([]string, 0, 3) + value := reflect.ValueOf(req) + typ := value.Type() + fields := reflect.VisibleFields(typ) + for _, f := range fields { + field := value.FieldByIndex(f.Index) + if field.Kind() == reflect.Struct { + // Only check the leaf fields. + continue + } + + if !field.IsZero() { + parts = parts[:0] + for i := range f.Index { + parts = append(parts, typ.FieldByIndex(f.Index[:i+1]).Name) + } + errs = append(errs, fmt.Errorf("unsupported field: %s = %q", strings.Join(parts, "."), field)) + } + } + + return errors.Join(errs...) +} + // Start implements testcontainers.Container interface for the local Ollama binary. func (c *localProcess) Start(ctx context.Context) error { if c.IsRunning() { diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index e2ab0c78ac..06f99d9f17 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -530,3 +530,67 @@ func TestRun_localExec(t *testing.T) { require.Contains(t, string(bs), "llama runner started") }) } + +func TestRun_localValidateRequest(t *testing.T) { + // check if the local ollama binary is available + if _, err := exec.LookPath(testBinary); err != nil { + t.Skip("local ollama binary not found, skipping") + } + + ctx := context.Background() + t.Run("waiting-for-nil", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + testImage, + ollama.WithUseLocal("FOO=BAR"), + testcontainers.CustomizeRequestOption(func(req *testcontainers.GenericContainerRequest) error { + req.WaitingFor = nil + return nil + }), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.EqualError(t, err, "validate request: ContainerRequest.WaitingFor must be set") + }) + + t.Run("started-false", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + testImage, + ollama.WithUseLocal("FOO=BAR"), + testcontainers.CustomizeRequestOption(func(req *testcontainers.GenericContainerRequest) error { + req.Started = false + return nil + }), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.EqualError(t, err, "validate request: Started must be true") + }) + + t.Run("exposed-ports-empty", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + testImage, + ollama.WithUseLocal("FOO=BAR"), + testcontainers.CustomizeRequestOption(func(req *testcontainers.GenericContainerRequest) error { + req.ExposedPorts = req.ExposedPorts[:0] + return nil + }), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.EqualError(t, err, "validate request: ContainerRequest.ExposedPorts must be 11434/tcp got: []") + }) + + t.Run("dockerfile-set", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + testImage, + ollama.WithUseLocal("FOO=BAR"), + testcontainers.CustomizeRequestOption(func(req *testcontainers.GenericContainerRequest) error { + req.Dockerfile = "FROM scratch" + return nil + }), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.EqualError(t, err, "validate request: unsupported field: ContainerRequest.FromDockerfile.Dockerfile = \"FROM scratch\"") + }) +} From 5c0486a9e7485f20adeec1d7e6169a55e421cc52 Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Wed, 18 Dec 2024 19:01:47 +0000 Subject: [PATCH 37/42] chore(ollama): remove temporary test Remove temporary simple test. --- modules/ollama/ollama_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/modules/ollama/ollama_test.go b/modules/ollama/ollama_test.go index 50e383d682..94212dc171 100644 --- a/modules/ollama/ollama_test.go +++ b/modules/ollama/ollama_test.go @@ -16,14 +16,6 @@ import ( "github.com/testcontainers/testcontainers-go/modules/ollama" ) -func TestOllamaBasic(t *testing.T) { - ctx := context.Background() - - ctr, err := ollama.Run(ctx, "ollama/ollama:0.1.25") - testcontainers.CleanupContainer(t, ctr) - require.NoError(t, err) -} - func TestOllama(t *testing.T) { ctx := context.Background() From bbd6242c05ab649b07aee3813a4548143d853ad3 Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Fri, 20 Dec 2024 17:46:18 +0000 Subject: [PATCH 38/42] feat(ollama): configurable local process binary Allow the local ollama binary name to be configured using the image name. --- modules/ollama/local.go | 34 ++++++++++++++++++++++++++---- modules/ollama/local_test.go | 40 ++++++++++++++++++++++++++++++++++++ modules/ollama/options.go | 1 + 3 files changed, 71 insertions(+), 4 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 78d8f70c36..371cbb60c5 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -89,6 +89,9 @@ type localProcess struct { // exitErr is the error returned by the process. exitErr error + + // binary is the name of the Ollama binary. + binary string } // runLocal returns an OllamaContainer that uses the local Ollama binary instead of using a Docker container. @@ -135,11 +138,34 @@ func (c *localProcess) validateRequest(req testcontainers.GenericContainerReques errs = append(errs, fmt.Errorf("ContainerRequest.ExposedPorts must be %s/tcp got: %s", localPort, req.ExposedPorts)) } + // Validate the image and extract the binary name. + // The image must be in the format "[/][:latest]". + if binary := req.Image; binary != "" { + // Check if the version is "latest" or not specified. + if idx := strings.IndexByte(binary, ':'); idx != -1 { + if binary[idx+1:] != "latest" { + errs = append(errs, fmt.Errorf(`ContainerRequest.Image version must be blank or "latest", got: %q`, binary[idx+1:])) + } + binary = binary[:idx] + } + + // Trim the path if present. + if idx := strings.LastIndexByte(binary, '/'); idx != -1 { + binary = binary[idx+1:] + } + + if _, err := exec.LookPath(binary); err != nil { + errs = append(errs, fmt.Errorf("invalid image %q: %w", req.Image, err)) + } else { + c.binary = binary + } + } + // Reset fields we support to their zero values. req.Env = nil req.ExposedPorts = nil req.WaitingFor = nil - req.Image = "" // We just ignore the image. + req.Image = "" req.Started = false req.Logger = nil // We don't need the logger. @@ -172,7 +198,7 @@ func (c *localProcess) Start(ctx context.Context) error { return errors.New("already running") } - cmd := exec.CommandContext(ctx, localBinary, localServeArg) + cmd := exec.CommandContext(ctx, c.binary, localServeArg) cmd.Env = c.env var err error @@ -330,7 +356,7 @@ func (c *localProcess) GetLogProductionErrorChannel() <-chan error { func (c *localProcess) Exec(ctx context.Context, cmd []string, options ...tcexec.ProcessOption) (int, io.Reader, error) { if len(cmd) == 0 { return 1, nil, errors.New("no command provided") - } else if cmd[0] != localBinary { + } else if cmd[0] != c.binary { return 1, nil, fmt.Errorf("command %q: %w", cmd[0], errors.ErrUnsupported) } @@ -416,7 +442,7 @@ func (c *localProcess) Inspect(ctx context.Context) (*types.ContainerJSON, error nat.Port(localPort + "/tcp"): struct{}{}, }, Hostname: c.host, - Entrypoint: []string{localBinary, localServeArg}, + Entrypoint: []string{c.binary, localServeArg}, }, NetworkSettings: &types.NetworkSettings{ Networks: map[string]*network.EndpointSettings{}, diff --git a/modules/ollama/local_test.go b/modules/ollama/local_test.go index 06f99d9f17..3e0376d4de 100644 --- a/modules/ollama/local_test.go +++ b/modules/ollama/local_test.go @@ -593,4 +593,44 @@ func TestRun_localValidateRequest(t *testing.T) { testcontainers.CleanupContainer(t, ollamaContainer) require.EqualError(t, err, "validate request: unsupported field: ContainerRequest.FromDockerfile.Dockerfile = \"FROM scratch\"") }) + + t.Run("image-only", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + testBinary, + ollama.WithUseLocal(), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + }) + + t.Run("image-path", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + "prefix-path/"+testBinary, + ollama.WithUseLocal(), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.NoError(t, err) + }) + + t.Run("image-bad-version", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + testBinary+":bad-version", + ollama.WithUseLocal(), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.EqualError(t, err, `validate request: ContainerRequest.Image version must be blank or "latest", got: "bad-version"`) + }) + + t.Run("image-not-found", func(t *testing.T) { + ollamaContainer, err := ollama.Run( + ctx, + "ollama/ollama-not-found", + ollama.WithUseLocal(), + ) + testcontainers.CleanupContainer(t, ollamaContainer) + require.EqualError(t, err, `validate request: invalid image "ollama/ollama-not-found": exec: "ollama-not-found": executable file not found in $PATH`) + }) } diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 1b345f6e9e..7095a3ec14 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -48,5 +48,6 @@ func WithUseLocal(envKeyValues ...string) *localProcess { sessionID: sessionID, logName: localNamePrefix + "-" + sessionID + ".log", env: envKeyValues, + binary: localBinary, } } From cb684b444ea286a7136b342931fec9c19dcca71a Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Fri, 20 Dec 2024 17:49:41 +0000 Subject: [PATCH 39/42] docs(ollama): detail local process supported fields Detail the container request supported fields. --- modules/ollama/options.go | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/modules/ollama/options.go b/modules/ollama/options.go index 7095a3ec14..1cf29453fe 100644 --- a/modules/ollama/options.go +++ b/modules/ollama/options.go @@ -39,9 +39,22 @@ func withGpu() testcontainers.CustomizeRequestOption { } // WithUseLocal starts a local Ollama process with the given environment in -// KEY=VALUE for instead of a Docker container which can be more performant +// format KEY=VALUE instead of a Docker container, which can be more performant // as it has direct access to the GPU. -// By default OLLAMA_HOST is set to localhost:0 to avoid port conflicts. +// By default `OLLAMA_HOST=localhost:0` is set to avoid port conflicts. +// +// When using this option, the container request will be validated to ensure +// that only the options that are compatible with the local process are used. +// +// Supported fields are: +// - [testcontainers.GenericContainerRequest.Started] must be set to true +// - [testcontainers.GenericContainerRequest.ExposedPorts] must be set to ["11434/tcp"] +// - [testcontainers.ContainerRequest.WaitingFor] should not be changed from the default +// - [testcontainers.ContainerRequest.Image] used to determine the local process binary [/][:latest] if not blank. +// - [testcontainers.ContainerRequest.Env] applied to all local process executions +// - [testcontainers.GenericContainerRequest.Logger] is unused +// +// Any other leaf field not set to the type's zero value will result in an error. func WithUseLocal(envKeyValues ...string) *localProcess { sessionID := testcontainers.SessionID() return &localProcess{ From 9be63096ef9c1cb69c812b71ca04b14bdfd11fea Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Fri, 20 Dec 2024 17:59:54 +0000 Subject: [PATCH 40/42] docs(ollama): update local process site docs Update local process site docs to match recent changes. --- docs/modules/ollama.md | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/modules/ollama.md b/docs/modules/ollama.md index bffe63648e..18cb08b47a 100644 --- a/docs/modules/ollama.md +++ b/docs/modules/ollama.md @@ -71,17 +71,17 @@ All the container methods are available when using the local Ollama binary, but Please consider the following differences when using the local Ollama binary: - The local Ollama binary will create a log file in the current working directory, identified by the session ID. E.g. `local-ollama-.log`. It's possible to set the log file name using the `OLLAMA_LOGFILE` environment variable. So if you're running Ollama yourself, from the Ollama app, or the standalone binary, you could use this environment variable to set the same log file name. - - For the Ollama app, the default log file resides in the `$HOME/.ollama/logs/server.log`. - - For the standalone binary, you should start it redirecting the logs to a file. E.g. `ollama serve > /tmp/ollama.log 2>&1`. -- `ConnectionString` returns the connection string to connect to the local Ollama binary started by the module instead of the container, which maps to `127.0.0.1:11434`. -- `ContainerIP` returns `127.0.0.1`. -- `ContainerIPs` returns `["127.0.0.1"]`. -- `CopyToContainer`, `CopyDirToContainer`, `CopyFileToContainer` and `CopyFileFromContainer` don't perform any action. + - For the Ollama app, the default log file resides in the `$HOME/.ollama/logs/server.log`. + - For the standalone binary, you should start it redirecting the logs to a file. E.g. `ollama serve > /tmp/ollama.log 2>&1`. +- `ConnectionString` returns the connection string to connect to the local Ollama binary started by the module instead of the container. +- `ContainerIP` returns the bound host IP `127.0.0.1` by default. +- `ContainerIPs` returns the bound host IP `["127.0.0.1"]` by default. +- `CopyToContainer`, `CopyDirToContainer`, `CopyFileToContainer` and `CopyFileFromContainer` return an error if called. - `GetLogProductionErrorChannel` returns a nil channel. -- `Endpoint` returns the endpoint to connect to the local Ollama binary started by the module instead of the container, which maps to `127.0.0.1:11434`. +- `Endpoint` returns the endpoint to connect to the local Ollama binary started by the module instead of the container. - `Exec` passes the command to the local Ollama binary started by the module instead of inside the container. First argument is the command to execute, and the second argument is the list of arguments, else, an error is returned. - `GetContainerID` returns the container ID of the local Ollama binary started by the module instead of the container, which maps to `local-ollama-`. -- `Host` returns `127.0.0.1`. +- `Host` returns the bound host IP `127.0.0.1` by default. - `Inspect` returns a ContainerJSON with the state of the local Ollama binary started by the module. - `IsRunning` returns true if the local Ollama binary process started by the module is running. - `Logs` returns the logs from the local Ollama binary started by the module instead of the container. @@ -95,7 +95,8 @@ The local Ollama binary will create a log file in the current working directory, !!!info The local Ollama binary will use the `OLLAMA_HOST` environment variable to set the host and port to listen on. - If the environment variable is not set, it will use the default host `127.0.0.1` and port `11434`. + If the environment variable is not set, it will default to `localhost:0` + which bind to a loopback address on an ephemeral port to avoid port conflicts. {% include "../features/common_functional_options.md" %} From 4c3a06ce4ac58faf6414835a9515bcc8685086d1 Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Thu, 2 Jan 2025 12:16:03 +0000 Subject: [PATCH 41/42] chore: refactor to support TerminateOption Refactor Terminate to support testcontainers.TerminateOption. --- modules/ollama/local.go | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 371cbb60c5..5811dc7bb6 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -213,7 +213,7 @@ func (c *localProcess) Start(ctx context.Context) error { // Run the ollama serve command in background. if err = cmd.Start(); err != nil { - return fmt.Errorf("start ollama serve: %w", errors.Join(err, c.cleanupLog())) + return fmt.Errorf("start ollama serve: %w", errors.Join(err, c.cleanup())) } // Past this point, the process was started successfully. @@ -548,36 +548,44 @@ func (c *localProcess) Stop(ctx context.Context, d *time.Duration) error { // Terminate implements testcontainers.Container interface for the local Ollama binary. // It stops the local Ollama process, removing the log file. -func (c *localProcess) Terminate(ctx context.Context) error { +func (c *localProcess) Terminate(ctx context.Context, opts ...testcontainers.TerminateOption) error { + options := testcontainers.NewTerminateOptions(ctx, opts...) // First try to stop gracefully. - if err := c.Stop(ctx, &defaultStopTimeout); !c.isCleanupSafe(err) { + if err := c.Stop(options.Context(), options.StopTimeout()); !c.isCleanupSafe(err) { return fmt.Errorf("stop: %w", err) } + var errs []error if c.IsRunning() { // Still running, force kill. if err := c.cmd.Process.Kill(); !c.isCleanupSafe(err) { - return fmt.Errorf("kill: %w", err) + // Best effort so we can continue with the cleanup. + errs = append(errs, fmt.Errorf("kill: %w", err)) } - // Wait for the process to exit so capture any error. + // Wait for the process to exit so we can capture any error. c.wg.Wait() } - c.mtx.Lock() - exitErr := c.exitErr - c.mtx.Unlock() + errs = append(errs, c.cleanup(), options.Cleanup()) - return errors.Join(exitErr, c.cleanupLog()) + return errors.Join(errs...) } -// cleanupLog closes the log file and removes it. -func (c *localProcess) cleanupLog() error { +// cleanup performs all clean up, closing and removing the log file if set. +func (c *localProcess) cleanup() error { + c.mtx.Lock() + defer c.mtx.Unlock() + if c.logFile == nil { - return nil + return c.exitErr } var errs []error + if c.exitErr != nil { + errs = append(errs, fmt.Errorf("exit: %w", c.exitErr)) + } + if err := c.logFile.Close(); err != nil { errs = append(errs, fmt.Errorf("close log: %w", err)) } From 39e7af4eccfaa9c144714668850bc6be28a76d9a Mon Sep 17 00:00:00 2001 From: Steven Hartland Date: Thu, 2 Jan 2025 13:25:44 +0000 Subject: [PATCH 42/42] fix: remove unused var --- modules/ollama/local.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/ollama/local.go b/modules/ollama/local.go index 5811dc7bb6..5751ceee07 100644 --- a/modules/ollama/local.go +++ b/modules/ollama/local.go @@ -43,9 +43,6 @@ var ( _ testcontainers.Container = (*localProcess)(nil) _ testcontainers.ContainerCustomizer = (*localProcess)(nil) - // defaultStopTimeout is the default timeout for stopping the local Ollama process. - defaultStopTimeout = time.Second * 5 - // zeroTime is the zero time value. zeroTime time.Time )