from promplate.llm import *#
promplate.llm.base #
Configurable #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
AsyncGenerate #
Bases: Protocol
__call__ #
__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
promplate.llm.openai #
Message #
Configurable #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
AsyncGenerate #
Bases: Protocol
__call__ #
__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
LLM #
TextComplete #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncTextComplete #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
TextGenerate #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncTextGenerate #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
ChatComplete #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncChatComplete #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ async #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
ChatGenerate #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncChatGenerate #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ async #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
SyncTextOpenAI #
Bases: ClientConfig, LLM
AsyncTextOpenAI #
Bases: AsyncClientConfig, LLM
SyncChatOpenAI #
Bases: ClientConfig, LLM
AsyncChatOpenAI #
Bases: AsyncClientConfig, LLM
ensure #
v0 #
Configurable #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
AsyncGenerate #
Bases: Protocol
__call__ #
__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
LLM #
Config #
Bases: ConfigurableSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
model instance-attribute #
temperature instance-attribute #
top_p instance-attribute #
stop instance-attribute #
max_tokens instance-attribute #
api_key instance-attribute #
api_base instance-attribute #
__init__ #
__init__(model: str, temperature: float | int | None = None, top_p: float | int | None = None, stop: str | list[str] | None = None, max_tokens: int | None = None, api_key: str | None = None, api_base: str | None = None, **other_config)
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
__setattr__ #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
TextComplete #
AsyncTextComplete #
Bases: Config, AsyncCompleteSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
TextGenerate #
AsyncTextGenerate #
Bases: Config, AsyncGenerateSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
ChatComplete #
AsyncChatComplete #
Bases: Config, AsyncCompleteSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
__call__ async #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
ChatGenerate #
Bases: Config, GenerateSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
__call__ #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
AsyncChatGenerate #
Bases: Config, AsyncGenerateSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
__call__ async #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v0.py
v1 #
__all__ module-attribute #
Configurable #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
AsyncGenerate #
Bases: Protocol
__call__ #
__call__(prompt, /, **config) -> AsyncIterable[str]
Source code in .venv/lib/python3.12/site-packages/promplate/llm/base.py
LLM #
Config #
Bases: ConfigurableSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__init__ #
TextComplete #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncTextComplete #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
TextGenerate #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncTextGenerate #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
ChatComplete #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncChatComplete #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ async #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
ChatGenerate #
Bases: ClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
AsyncChatGenerate #
Bases: AsyncClientConfigSource code in
.venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
__call__ async #
Source code in .venv/lib/python3.12/site-packages/promplate/llm/openai/v1.py
SyncTextOpenAI #
Bases: ClientConfig, LLM
AsyncTextOpenAI #
Bases: AsyncClientConfig, LLM
SyncChatOpenAI #
Bases: ClientConfig, LLM
AsyncChatOpenAI #
Bases: AsyncClientConfig, LLM