Skip to content

File: Broken/Externals/ollama.py

Broken.Externals.ollama

BrokenOllama

Bases: ExternalModelsBase

Source code in Broken/Externals/ollama.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
class BrokenOllama(ExternalModelsBase):
    model: Annotated[str, Option("--model", "-m",
        help="[bold green](🟢 Basic)[/] Any valid model name from https://ollama.com/library")] = \
        Field("qwen2")

    def install(self):
        if bool(shutil.which("ollama")):
            return

        log.warning("Ollama binary [green]'ollama'[/] wasn't found on PATH, installing..")

        if BrokenPlatform.OnMacOS:
            raise RuntimeError("Ollama installaion on macOS is untested, please get it at their website")
            url = "https://github.com/ollama/ollama/releases/latest/download/Ollama-darwin.zip"

        elif BrokenPlatform.OnWindows:
            url = "https://github.com/ollama/ollama/releases/latest/download/ollama-windows-amd64.zip"

        elif BrokenPlatform.OnLinux:
            log.warning("")
            log.warning("The installation on Linux is slightly non-trivial, and it's better to use their official script")
            log.warning("• Please, get it at their website https://ollama.com/download/linux")
            log.warning("• Hint: run [green]'curl -fsSL https://ollama.com/install.sh | sh'[/]")
            log.warning("• Alternatively, install from your distro's package manager")
            exit(0)

        BrokenPath.get_external(url)

    def _load_model(self):
        self.install()

        # Download the model if it isn't found (external call for progress bars)
        if shell("ollama", "show", self.model, echo=False, stdout=subprocess.DEVNULL).returncode != 0:
            if shell("ollama", "pull", self.model).returncode != 0:
                raise RuntimeError(f"Couldn't pull model {self.model}")

    def prompt(self,
        prompt: str,
        *,
        system: str="",
        temperature: float=0.6,
        # top_k: int=10,
        # top_p: float=0.3,
    ) -> str:
        self.load_model()

        with Halo(f"Ollama model ({self.model}) is thinking.."):
            return ollama.generate(
                model=self.model,
                prompt=prompt,
                system=system,
                keep_alive=1,
                options=dict(
                    num_tokens=(2**10)*(2**4),
                    temperature=temperature,
                    num_ctx=4096,
                    # top_k=top_k,
                    # top_p=top_p,
                )
            )["response"]

model

model: Annotated[
    str,
    Option(
        --model,
        -m,
        help="[bold green](🟢 Basic)[/] Any valid model name from https://ollama.com/library",
    ),
] = Field("qwen2")

install

install()
Source code in Broken/Externals/ollama.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
def install(self):
    if bool(shutil.which("ollama")):
        return

    log.warning("Ollama binary [green]'ollama'[/] wasn't found on PATH, installing..")

    if BrokenPlatform.OnMacOS:
        raise RuntimeError("Ollama installaion on macOS is untested, please get it at their website")
        url = "https://github.com/ollama/ollama/releases/latest/download/Ollama-darwin.zip"

    elif BrokenPlatform.OnWindows:
        url = "https://github.com/ollama/ollama/releases/latest/download/ollama-windows-amd64.zip"

    elif BrokenPlatform.OnLinux:
        log.warning("")
        log.warning("The installation on Linux is slightly non-trivial, and it's better to use their official script")
        log.warning("• Please, get it at their website https://ollama.com/download/linux")
        log.warning("• Hint: run [green]'curl -fsSL https://ollama.com/install.sh | sh'[/]")
        log.warning("• Alternatively, install from your distro's package manager")
        exit(0)

    BrokenPath.get_external(url)

prompt

prompt(
    prompt: str,
    *,
    system: str = "",
    temperature: float = 0.6
) -> str
Source code in Broken/Externals/ollama.py
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
def prompt(self,
    prompt: str,
    *,
    system: str="",
    temperature: float=0.6,
    # top_k: int=10,
    # top_p: float=0.3,
) -> str:
    self.load_model()

    with Halo(f"Ollama model ({self.model}) is thinking.."):
        return ollama.generate(
            model=self.model,
            prompt=prompt,
            system=system,
            keep_alive=1,
            options=dict(
                num_tokens=(2**10)*(2**4),
                temperature=temperature,
                num_ctx=4096,
                # top_k=top_k,
                # top_p=top_p,
            )
        )["response"]