Skip to content

from promplate.llm import *#

promplate.llm.base #

Configurable #

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Configurable:
    def __init__(self, **config):
        for key, val in config.items():
            setattr(self, key, val)

    @property
    def _config(self):
        return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}

__init__ #

__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
6
7
8
def __init__(self, **config):
    for key, val in config.items():
        setattr(self, key, val)

Complete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Complete(Protocol):
    def __call__(self, prompt, /, **config) -> str: ...

__call__ #

__call__(prompt, /, **config) -> str
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> str: ...

Generate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Generate(Protocol):
    def __call__(self, prompt, /, **config) -> Iterable[str]: ...

__call__ #

__call__(prompt, /, **config) -> Iterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Iterable[str]: ...

AsyncComplete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncComplete(Protocol):
    def __call__(self, prompt, /, **config) -> Awaitable[str]: ...

__call__ #

__call__(prompt, /, **config) -> Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Awaitable[str]: ...

AsyncGenerate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncGenerate(Protocol):
    def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...

__call__ #

__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...

LLM #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class LLM(Protocol):
    @partial(cast, Complete | AsyncComplete)
    def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...

    @partial(cast, Generate | AsyncGenerate)
    def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...

complete #

complete(prompt, /, **config) -> str | Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Complete | AsyncComplete)
def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...

generate #

generate(prompt, /, **config) -> Iterable[str] | AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Generate | AsyncGenerate)
def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...

promplate.llm.openai #

meta module-attribute #

meta = metadata('promplate')

Message #

Bases: TypedDict

Source code in .venv/lib/python3.12/site-packages/promplate/prompt/chat.py
class Message(TypedDict):
    role: Role
    content: str
    name: NotRequired[str]

role instance-attribute #

role: Role

content instance-attribute #

content: str

name instance-attribute #

name: NotRequired[str]

Configurable #

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Configurable:
    def __init__(self, **config):
        for key, val in config.items():
            setattr(self, key, val)

    @property
    def _config(self):
        return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}

__init__ #

__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
6
7
8
def __init__(self, **config):
    for key, val in config.items():
        setattr(self, key, val)

Complete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Complete(Protocol):
    def __call__(self, prompt, /, **config) -> str: ...

__call__ #

__call__(prompt, /, **config) -> str
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> str: ...

Generate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Generate(Protocol):
    def __call__(self, prompt, /, **config) -> Iterable[str]: ...

__call__ #

__call__(prompt, /, **config) -> Iterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Iterable[str]: ...

AsyncComplete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncComplete(Protocol):
    def __call__(self, prompt, /, **config) -> Awaitable[str]: ...

__call__ #

__call__(prompt, /, **config) -> Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Awaitable[str]: ...

AsyncGenerate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncGenerate(Protocol):
    def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...

__call__ #

__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...

LLM #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class LLM(Protocol):
    @partial(cast, Complete | AsyncComplete)
    def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...

    @partial(cast, Generate | AsyncGenerate)
    def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...

complete #

complete(prompt, /, **config) -> str | Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Complete | AsyncComplete)
def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...

generate #

generate(prompt, /, **config) -> Iterable[str] | AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Generate | AsyncGenerate)
def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...

TextComplete #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class TextComplete(ClientConfig):
    def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        result = self._client.completions.create(**config, stream=False)
        return result.choices[0].text

__call__ #

__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    result = self._client.completions.create(**config, stream=False)
    return result.choices[0].text

AsyncTextComplete #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncTextComplete(AsyncClientConfig):
    async def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        result = await self._aclient.completions.create(**config, stream=False)
        return result.choices[0].text

__call__ async #

__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    result = await self._aclient.completions.create(**config, stream=False)
    return result.choices[0].text

TextGenerate #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class TextGenerate(ClientConfig):
    def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        stream = self._client.completions.create(**config, stream=True)
        for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].text

__call__ #

__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    stream = self._client.completions.create(**config, stream=True)
    for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].text

AsyncTextGenerate #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncTextGenerate(AsyncClientConfig):
    async def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        stream = await self._aclient.completions.create(**config, stream=True)
        async for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].text

__call__ async #

__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    stream = await self._aclient.completions.create(**config, stream=True)
    async for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].text

ChatComplete #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class ChatComplete(ClientConfig):
    def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        result = self._client.chat.completions.create(**config, stream=False)
        return result.choices[0].message.content or ""

__call__ #

__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    result = self._client.chat.completions.create(**config, stream=False)
    return result.choices[0].message.content or ""

AsyncChatComplete #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncChatComplete(AsyncClientConfig):
    async def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        result = await self._aclient.chat.completions.create(**config, stream=False)
        return result.choices[0].message.content or ""

__call__ async #

__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    result = await self._aclient.chat.completions.create(**config, stream=False)
    return result.choices[0].message.content or ""

ChatGenerate #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class ChatGenerate(ClientConfig):
    def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        stream = self._client.chat.completions.create(**config, stream=True)
        for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].delta.content or ""

__call__ #

__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    stream = self._client.chat.completions.create(**config, stream=True)
    for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].delta.content or ""

AsyncChatGenerate #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncChatGenerate(AsyncClientConfig):
    async def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        stream = await self._aclient.chat.completions.create(**config, stream=True)
        async for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].delta.content or ""

__call__ async #

__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    stream = await self._aclient.chat.completions.create(**config, stream=True)
    async for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].delta.content or ""

SyncTextOpenAI #

Bases: ClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class SyncTextOpenAI(ClientConfig, LLM):
    complete = TextComplete.__call__
    generate = TextGenerate.__call__

complete class-attribute instance-attribute #

complete = __call__

generate class-attribute instance-attribute #

generate = __call__

AsyncTextOpenAI #

Bases: AsyncClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncTextOpenAI(AsyncClientConfig, LLM):
    complete = AsyncTextComplete.__call__
    generate = AsyncTextGenerate.__call__

complete class-attribute instance-attribute #

complete = __call__

generate class-attribute instance-attribute #

generate = __call__

SyncChatOpenAI #

Bases: ClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class SyncChatOpenAI(ClientConfig, LLM):
    complete = ChatComplete.__call__
    generate = ChatGenerate.__call__

complete class-attribute instance-attribute #

complete = __call__

generate class-attribute instance-attribute #

generate = __call__

AsyncChatOpenAI #

Bases: AsyncClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncChatOpenAI(AsyncClientConfig, LLM):
    complete = AsyncChatComplete.__call__
    generate = AsyncChatGenerate.__call__

complete class-attribute instance-attribute #

complete = __call__

generate class-attribute instance-attribute #

generate = __call__

ensure #

ensure(text_or_list: list[Message] | str) -> list[Message]
Source code in .venv/lib/python3.12/site-packages/promplate/prompt/chat.py
def ensure(text_or_list: list[Message] | str) -> list[Message]:
    return parse_chat_markup(text_or_list) if isinstance(text_or_list, str) else text_or_list

v0 #

meta module-attribute #

meta = metadata('promplate')

Configurable #

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Configurable:
    def __init__(self, **config):
        for key, val in config.items():
            setattr(self, key, val)

    @property
    def _config(self):
        return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
__init__ #
__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
6
7
8
def __init__(self, **config):
    for key, val in config.items():
        setattr(self, key, val)

Complete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Complete(Protocol):
    def __call__(self, prompt, /, **config) -> str: ...
__call__ #
__call__(prompt, /, **config) -> str
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> str: ...

Generate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Generate(Protocol):
    def __call__(self, prompt, /, **config) -> Iterable[str]: ...
__call__ #
__call__(prompt, /, **config) -> Iterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Iterable[str]: ...

AsyncComplete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncComplete(Protocol):
    def __call__(self, prompt, /, **config) -> Awaitable[str]: ...
__call__ #
__call__(prompt, /, **config) -> Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Awaitable[str]: ...

AsyncGenerate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncGenerate(Protocol):
    def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...
__call__ #
__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...

LLM #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class LLM(Protocol):
    @partial(cast, Complete | AsyncComplete)
    def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...

    @partial(cast, Generate | AsyncGenerate)
    def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...
complete #
complete(prompt, /, **config) -> str | Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Complete | AsyncComplete)
def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...
generate #
generate(prompt, /, **config) -> Iterable[str] | AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Generate | AsyncGenerate)
def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...

Config #

Bases: Configurable

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class Config(Configurable):
    def __init__(
        self,
        model: str,
        temperature: float | int | None = None,
        top_p: float | int | None = None,
        stop: str | list[str] | None = None,
        max_tokens: int | None = None,
        api_key: str | None = None,
        api_base: str | None = None,
        **other_config,
    ):
        self.model = model
        self.temperature = temperature
        self.top_p = top_p
        self.stop = stop
        self.max_tokens = max_tokens
        self.api_key = api_key
        self.api_base = api_base

        for key, val in other_config.items():
            setattr(self, key, val)

    def __setattr__(self, *_): ...

    def __getattr__(self, _): ...
model instance-attribute #
model = model
temperature instance-attribute #
temperature = temperature
top_p instance-attribute #
top_p = top_p
stop instance-attribute #
stop = stop
max_tokens instance-attribute #
max_tokens = max_tokens
api_key instance-attribute #
api_key = api_key
api_base instance-attribute #
api_base = api_base
__init__ #
__init__(model: str, temperature: float | int | None = None, top_p: float | int | None = None, stop: str | list[str] | None = None, max_tokens: int | None = None, api_key: str | None = None, api_base: str | None = None, **other_config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __init__(
    self,
    model: str,
    temperature: float | int | None = None,
    top_p: float | int | None = None,
    stop: str | list[str] | None = None,
    max_tokens: int | None = None,
    api_key: str | None = None,
    api_base: str | None = None,
    **other_config,
):
    self.model = model
    self.temperature = temperature
    self.top_p = top_p
    self.stop = stop
    self.max_tokens = max_tokens
    self.api_key = api_key
    self.api_base = api_base

    for key, val in other_config.items():
        setattr(self, key, val)
__setattr__ #
__setattr__(*_)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __setattr__(self, *_): ...
__getattr__ #
__getattr__(_)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __getattr__(self, _): ...

TextComplete #

Bases: Config, Complete

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class TextComplete(Config, Complete):
    def __call__(self, text: str, /, **config):
        config = self._config | config | {"stream": False, "prompt": text}
        result: Any = Completion.create(**config)
        return result["choices"][0]["text"]
__call__ #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __call__(self, text: str, /, **config):
    config = self._config | config | {"stream": False, "prompt": text}
    result: Any = Completion.create(**config)
    return result["choices"][0]["text"]

AsyncTextComplete #

Bases: Config, AsyncComplete

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class AsyncTextComplete(Config, AsyncComplete):
    async def __call__(self, text: str, /, **config):
        config = self._config | config | {"stream": False, "prompt": text}
        result: Any = await Completion.acreate(**config)
        return result["choices"][0]["text"]
__call__ async #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
async def __call__(self, text: str, /, **config):
    config = self._config | config | {"stream": False, "prompt": text}
    result: Any = await Completion.acreate(**config)
    return result["choices"][0]["text"]

TextGenerate #

Bases: Config, Generate

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class TextGenerate(Config, Generate):
    def __call__(self, text: str, /, **config):
        config = self._config | config | {"stream": True, "prompt": text}
        stream: Any = Completion.create(**config)
        for event in stream:
            yield event["choices"][0]["text"]
__call__ #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __call__(self, text: str, /, **config):
    config = self._config | config | {"stream": True, "prompt": text}
    stream: Any = Completion.create(**config)
    for event in stream:
        yield event["choices"][0]["text"]

AsyncTextGenerate #

Bases: Config, AsyncGenerate

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class AsyncTextGenerate(Config, AsyncGenerate):
    async def __call__(self, text: str, /, **config):
        config = self._config | config | {"stream": True, "prompt": text}
        stream: Any = await Completion.acreate(**config)
        async for event in stream:
            yield event["choices"][0]["text"]
__call__ async #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
async def __call__(self, text: str, /, **config):
    config = self._config | config | {"stream": True, "prompt": text}
    stream: Any = await Completion.acreate(**config)
    async for event in stream:
        yield event["choices"][0]["text"]

ChatComplete #

Bases: Config, Complete

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class ChatComplete(Config, Complete):
    def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._config | config | {"stream": False, "messages": messages}
        result: Any = ChatCompletion.create(**config)
        return result["choices"][0]["message"]["content"]
__call__ #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._config | config | {"stream": False, "messages": messages}
    result: Any = ChatCompletion.create(**config)
    return result["choices"][0]["message"]["content"]

AsyncChatComplete #

Bases: Config, AsyncComplete

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class AsyncChatComplete(Config, AsyncComplete):
    async def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._config | config | {"stream": False, "messages": messages}
        result: Any = await ChatCompletion.acreate(**config)
        return result["choices"][0]["message"]["content"]
__call__ async #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
async def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._config | config | {"stream": False, "messages": messages}
    result: Any = await ChatCompletion.acreate(**config)
    return result["choices"][0]["message"]["content"]

ChatGenerate #

Bases: Config, Generate

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class ChatGenerate(Config, Generate):
    def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._config | config | {"stream": True, "messages": messages}
        stream: Any = ChatCompletion.create(**config)
        for event in stream:
            delta: dict = event["choices"][0]["delta"]
            yield delta.get("content", "")
__call__ #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._config | config | {"stream": True, "messages": messages}
    stream: Any = ChatCompletion.create(**config)
    for event in stream:
        delta: dict = event["choices"][0]["delta"]
        yield delta.get("content", "")

AsyncChatGenerate #

Bases: Config, AsyncGenerate

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
class AsyncChatGenerate(Config, AsyncGenerate):
    async def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._config | config | {"stream": True, "messages": messages}
        stream: Any = await ChatCompletion.acreate(**config)
        async for event in stream:
            delta: dict = event["choices"][0]["delta"]
            yield delta.get("content", "")
__call__ async #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
async def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._config | config | {"stream": True, "messages": messages}
    stream: Any = await ChatCompletion.acreate(**config)
    async for event in stream:
        delta: dict = event["choices"][0]["delta"]
        yield delta.get("content", "")

v1 #

P module-attribute #

P = ParamSpec('P')

T module-attribute #

T = TypeVar('T')

__all__ module-attribute #

__all__ = ('TextComplete', 'AsyncTextComplete', 'TextGenerate', 'AsyncTextGenerate', 'ChatComplete', 'AsyncChatComplete', 'ChatGenerate', 'AsyncChatGenerate', 'SyncTextOpenAI', 'AsyncTextOpenAI', 'SyncChatOpenAI', 'AsyncChatOpenAI')

Configurable #

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Configurable:
    def __init__(self, **config):
        for key, val in config.items():
            setattr(self, key, val)

    @property
    def _config(self):
        return {k: v for k, v in self.__dict__.items() if not k.startswith("_")}
__init__ #
__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
6
7
8
def __init__(self, **config):
    for key, val in config.items():
        setattr(self, key, val)

Complete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Complete(Protocol):
    def __call__(self, prompt, /, **config) -> str: ...
__call__ #
__call__(prompt, /, **config) -> str
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> str: ...

Generate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class Generate(Protocol):
    def __call__(self, prompt, /, **config) -> Iterable[str]: ...
__call__ #
__call__(prompt, /, **config) -> Iterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Iterable[str]: ...

AsyncComplete #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncComplete(Protocol):
    def __call__(self, prompt, /, **config) -> Awaitable[str]: ...
__call__ #
__call__(prompt, /, **config) -> Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> Awaitable[str]: ...

AsyncGenerate #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class AsyncGenerate(Protocol):
    def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...
__call__ #
__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
def __call__(self, prompt, /, **config) -> AsyncIterable[str]: ...

LLM #

Bases: Protocol

Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
class LLM(Protocol):
    @partial(cast, Complete | AsyncComplete)
    def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...

    @partial(cast, Generate | AsyncGenerate)
    def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...
complete #
complete(prompt, /, **config) -> str | Awaitable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Complete | AsyncComplete)
def complete(self, prompt, /, **config) -> str | Awaitable[str]: ...
generate #
generate(prompt, /, **config) -> Iterable[str] | AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
@partial(cast, Generate | AsyncGenerate)
def generate(self, prompt, /, **config) -> Iterable[str] | AsyncIterable[str]: ...

Config #

Bases: Configurable

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class Config(Configurable):
    def __init__(self, **config):
        super().__init__(**config)
        self._run_config = {}

    def bind(self, **run_config):
        obj = copy(self)
        obj._run_config = self._run_config | run_config
        return obj

    @cached_property
    def _user_agent(self):
        from openai.version import VERSION

        return get_user_agent(self, ("OpenAI", VERSION))

    @property
    def _config(self):  # type: ignore
        ua_header = {"User-Agent": self._user_agent}
        config = dict(super()._config)
        config["default_headers"] = config.get("default_headers", {}) | ua_header
        return MappingProxyType(config)

    @cached_property
    def _client(self):
        if "http_client" in self._config:
            return Client(**self._config)
        else:
            return Client(**self._config, http_client=_get_client())

    @cached_property
    def _aclient(self):
        if "http_client" in self._config:
            return AsyncClient(**self._config)
        else:
            return AsyncClient(**self._config, http_client=_get_aclient())
__init__ #
__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __init__(self, **config):
    super().__init__(**config)
    self._run_config = {}
bind #
bind(**run_config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def bind(self, **run_config):
    obj = copy(self)
    obj._run_config = self._run_config | run_config
    return obj

ClientConfig #

Bases: Config

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class ClientConfig(Config):
    @same_params_as(Client)
    def __init__(self, **config): ...
__init__ #
__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
@same_params_as(Client)
def __init__(self, **config): ...

AsyncClientConfig #

Bases: Config

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncClientConfig(Config):
    @same_params_as(AsyncClient)
    def __init__(self, **config): ...
__init__ #
__init__(**config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
@same_params_as(AsyncClient)
def __init__(self, **config): ...

TextComplete #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class TextComplete(ClientConfig):
    def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        result = self._client.completions.create(**config, stream=False)
        return result.choices[0].text
__call__ #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    result = self._client.completions.create(**config, stream=False)
    return result.choices[0].text

AsyncTextComplete #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncTextComplete(AsyncClientConfig):
    async def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        result = await self._aclient.completions.create(**config, stream=False)
        return result.choices[0].text
__call__ async #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    result = await self._aclient.completions.create(**config, stream=False)
    return result.choices[0].text

TextGenerate #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class TextGenerate(ClientConfig):
    def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        stream = self._client.completions.create(**config, stream=True)
        for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].text
__call__ #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    stream = self._client.completions.create(**config, stream=True)
    for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].text

AsyncTextGenerate #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncTextGenerate(AsyncClientConfig):
    async def __call__(self, text: str, /, **config):
        config = self._run_config | config | {"prompt": text}
        stream = await self._aclient.completions.create(**config, stream=True)
        async for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].text
__call__ async #
__call__(text: str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, text: str, /, **config):
    config = self._run_config | config | {"prompt": text}
    stream = await self._aclient.completions.create(**config, stream=True)
    async for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].text

ChatComplete #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class ChatComplete(ClientConfig):
    def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        result = self._client.chat.completions.create(**config, stream=False)
        return result.choices[0].message.content or ""
__call__ #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    result = self._client.chat.completions.create(**config, stream=False)
    return result.choices[0].message.content or ""

AsyncChatComplete #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncChatComplete(AsyncClientConfig):
    async def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        result = await self._aclient.chat.completions.create(**config, stream=False)
        return result.choices[0].message.content or ""
__call__ async #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    result = await self._aclient.chat.completions.create(**config, stream=False)
    return result.choices[0].message.content or ""

ChatGenerate #

Bases: ClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class ChatGenerate(ClientConfig):
    def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        stream = self._client.chat.completions.create(**config, stream=True)
        for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].delta.content or ""
__call__ #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    stream = self._client.chat.completions.create(**config, stream=True)
    for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].delta.content or ""

AsyncChatGenerate #

Bases: AsyncClientConfig

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncChatGenerate(AsyncClientConfig):
    async def __call__(self, messages: list[Message] | str, /, **config):
        messages = ensure(messages)
        config = self._run_config | config | {"messages": messages}
        stream = await self._aclient.chat.completions.create(**config, stream=True)
        async for event in stream:
            with suppress(AttributeError, IndexError):
                yield event.choices[0].delta.content or ""
__call__ async #
__call__(messages: list[Message] | str, /, **config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
async def __call__(self, messages: list[Message] | str, /, **config):
    messages = ensure(messages)
    config = self._run_config | config | {"messages": messages}
    stream = await self._aclient.chat.completions.create(**config, stream=True)
    async for event in stream:
        with suppress(AttributeError, IndexError):
            yield event.choices[0].delta.content or ""

SyncTextOpenAI #

Bases: ClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class SyncTextOpenAI(ClientConfig, LLM):
    complete = TextComplete.__call__
    generate = TextGenerate.__call__
complete class-attribute instance-attribute #
complete = __call__
generate class-attribute instance-attribute #
generate = __call__

AsyncTextOpenAI #

Bases: AsyncClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncTextOpenAI(AsyncClientConfig, LLM):
    complete = AsyncTextComplete.__call__
    generate = AsyncTextGenerate.__call__
complete class-attribute instance-attribute #
complete = __call__
generate class-attribute instance-attribute #
generate = __call__

SyncChatOpenAI #

Bases: ClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class SyncChatOpenAI(ClientConfig, LLM):
    complete = ChatComplete.__call__
    generate = ChatGenerate.__call__
complete class-attribute instance-attribute #
complete = __call__
generate class-attribute instance-attribute #
generate = __call__

AsyncChatOpenAI #

Bases: AsyncClientConfig, LLM

Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
class AsyncChatOpenAI(AsyncClientConfig, LLM):
    complete = AsyncChatComplete.__call__
    generate = AsyncChatGenerate.__call__
complete class-attribute instance-attribute #
complete = __call__
generate class-attribute instance-attribute #
generate = __call__

same_params_as #

same_params_as(_: Callable[P, Any]) -> Callable[[Callable[..., None]], Callable[P, None]]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
def same_params_as(_: Callable[P, Any]) -> Callable[[Callable[..., None]], Callable[P, None]]: ...