Skip to content

Commit 43ede87

Browse files
committed
feat: creates stubs
1 parent 32840dc commit 43ede87

10 files changed

Lines changed: 137 additions & 8 deletions

File tree

flowchat/__init__.pyi

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from .types import *
2+
from .autodedent import autodedent as autodedent
3+
from .chain import Chain as Chain

flowchat/autodedent.pyi

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
from typing import Any
2+
3+
def autodedent(*text_lines: Any) -> str: ...

flowchat/chain.py

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,10 @@
22
from .private._private_helpers import encode_image, wrap_stream_and_count, async_wrap_stream_and_count
33
from .types import *
44
from datetime import datetime
5+
from openai.types.chat.chat_completion import ChatCompletion
56
from typing import List, Optional, Union, Callable, Any, Generator, AsyncGenerator
67
from typing_extensions import Unpack
8+
from openai import Stream, AsyncStream
79
import json
810
import logging
911
import openai
@@ -125,7 +127,8 @@ def _ask(
125127
return None
126128

127129
if stream and isinstance(completion, Stream):
128-
return wrap_stream_and_count(completion, model, self._add_token_count, plain_text_stream)
130+
return wrap_stream_and_count(
131+
completion, model, self._add_token_count, plain_text_stream) # type: ignore
129132

130133
return self._post_completion(completion, model, json_schema)
131134

@@ -152,7 +155,8 @@ async def async_ask(
152155
return None
153156

154157
if stream and isinstance(completion, AsyncStream):
155-
return async_wrap_stream_and_count(completion, model, self._add_token_count, plain_text_stream)
158+
return async_wrap_stream_and_count(
159+
completion, model, self._add_token_count, plain_text_stream) # type: ignore
156160

157161
return self._post_completion(completion, model, json_schema)
158162

@@ -253,7 +257,7 @@ def link(self, modifier: Union[Callable[[str | Any | None], str], str], model: s
253257
)
254258
return self
255259

256-
def setup_pull(self, asynchronous: bool = False, json_schema: Optional[dict[Any, Any]] = None, **params: Unpack[RequestParams]):
260+
def setup_pull(self, asynchronous: bool = False, json_schema: Optional[dict[Any, Any]] = None, **params: Unpack[RequestParams]) -> Any:
257261
params['model'] = params.get('model', self.model)
258262

259263
if len(self.user_prompt) == 0:

flowchat/chain.pyi

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
from .types import *
2+
from .autodedent import autodedent as autodedent
3+
from .private._private_helpers import async_wrap_stream_and_count as async_wrap_stream_and_count, encode_image as encode_image, wrap_stream_and_count as wrap_stream_and_count
4+
from _typeshed import Incomplete
5+
from typing import Any, AsyncGenerator, Callable, Generator
6+
from typing_extensions import Unpack
7+
8+
class Chain:
9+
client: Incomplete
10+
asyncClient: Incomplete
11+
model: Incomplete
12+
system: Incomplete
13+
user_prompt: Incomplete
14+
model_response: Incomplete
15+
raw_model_response: Incomplete
16+
usage: Incomplete
17+
detailed_usage: Incomplete
18+
def __init__(self, model: str, api_key: str = '', environ_key: str = 'OPENAI_API_KEY') -> None: ...
19+
async def async_ask(self, system: Message | None, user_messages: list[Message], json_schema: dict[Any, Any] | None = None, stream: bool = False, plain_text_stream: bool = False, **params: Unpack[RequestParams]) -> Any: ...
20+
def unhook(self) -> Chain: ...
21+
def anchor(self, system_prompt: str) -> Chain: ...
22+
def transform(self, function: Callable[[Any], Any]) -> Chain: ...
23+
def link(self, modifier: Callable[[str | Any | None], str] | str, model: str | None = None, assistant: bool = False, images: str | Any | list[str | Any] | ImageFormat = None) -> Chain: ...
24+
def setup_pull(self, asynchronous: bool = False, json_schema: dict[Any, Any] | None = None, **params: Unpack[RequestParams]) -> Any: ...
25+
def pull(self, json_schema: dict[Any, Any] | None = None, **params: Unpack[RequestParams]) -> Chain: ...
26+
async def async_pull(self, json_schema: dict[Any, Any] | None = None, **params: Unpack[RequestParams]) -> Chain: ...
27+
def stream(self, plain_text_stream: bool = False, **params: Unpack[RequestParams]) -> Generator[str | Any | None, None, None]: ...
28+
async def async_stream(self, plain_text_stream: bool = False, **params: Unpack[RequestParams]) -> AsyncGenerator[str | Any | None, None]: ...
29+
def last(self) -> Any: ...
30+
def token_usage(self) -> Usage: ...
31+
def detailed_token_usage(self) -> list[DetailedUsage]: ...
32+
def reset_token_usage(self) -> Chain: ...
33+
def subscribe_token_usage(self, usage_object: Usage) -> Chain: ...
34+
def subscribe_detailed_token_usage(self, detailed_usage_object: list[DetailedUsage]) -> Chain: ...
35+
def log(self) -> Chain: ...
36+
def log_tokens(self) -> Chain: ...
37+
def log_detailed_tokens(self) -> Chain: ...

flowchat/private/__init__.pyi

Whitespace-only changes.

flowchat/private/_private_helpers.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,18 +1,19 @@
1-
from typing import Callable
1+
from typing import Callable, Generator, AsyncGenerator
2+
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
23
from ..types import *
34
from io import BytesIO
45
from PIL.Image import Image as PILImage
56
import base64
67

78

8-
def encode_image(image: PILImage, format_type: str = "PNG"):
9+
def encode_image(image: PILImage, format_type: str = "PNG") -> str:
910
buffered = BytesIO()
1011
image.save(buffered, format=format_type)
1112
img_str = base64.b64encode(buffered.getvalue())
1213
return f"data:image/png;base64,{img_str.decode('utf-8')}"
1314

1415

15-
def wrap_stream_and_count(generator: StreamCompletion, model: str, callback: Callable[[int, int, str], None], plain_text_stream: bool = False):
16+
def wrap_stream_and_count(generator: StreamCompletion, model: str, callback: Callable[[int, int, str], None], plain_text_stream: bool = False) -> Generator[str | ChatCompletionChunk | None, None, None]:
1617
for response in generator:
1718
if len(response.choices) == 0:
1819
usage = response.usage
@@ -23,7 +24,7 @@ def wrap_stream_and_count(generator: StreamCompletion, model: str, callback: Cal
2324
yield content if plain_text_stream else response
2425

2526

26-
async def async_wrap_stream_and_count(generator: AsyncStreamCompletion, model: str, callback: Callable[[int, int, str], None], plain_text_stream: bool = False):
27+
async def async_wrap_stream_and_count(generator: AsyncStreamCompletion, model: str, callback: Callable[[int, int, str], None], plain_text_stream: bool = False) -> AsyncGenerator[str | ChatCompletionChunk | None, None]:
2728
async for response in generator:
2829
if len(response.choices) == 0:
2930
usage = response.usage
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
from ..types import *
2+
from PIL.Image import Image as PILImage
3+
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
4+
from typing import AsyncGenerator, Callable, Generator
5+
6+
def encode_image(image: PILImage, format_type: str = 'PNG') -> str: ...
7+
def wrap_stream_and_count(generator: StreamCompletion, model: str, callback: Callable[[int, int, str], None], plain_text_stream: bool = False) -> Generator[str | ChatCompletionChunk | None, None, None]: ...
8+
async def async_wrap_stream_and_count(generator: AsyncStreamCompletion, model: str, callback: Callable[[int, int, str], None], plain_text_stream: bool = False) -> AsyncGenerator[str | ChatCompletionChunk | None, None]: ...

flowchat/types.pyi

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,70 @@
1+
from PIL.Image import Image as PILImage
2+
from _typeshed import Incomplete
3+
from datetime import datetime
4+
from openai import AsyncStream, Stream
5+
from openai.types.chat.chat_completion_chunk import ChatCompletionChunk
6+
from typing import Any, Literal, NotRequired
7+
from typing_extensions import TypedDict
8+
9+
StreamCompletion = Stream[ChatCompletionChunk]
10+
AsyncStreamCompletion = AsyncStream[ChatCompletionChunk]
11+
CreateResponse: Incomplete
12+
13+
class Message(TypedDict):
14+
role: str
15+
content: str | list[Any]
16+
17+
class ResponseFormat(TypedDict):
18+
type: Literal['text', 'json_object']
19+
20+
class ImageFormat(TypedDict):
21+
url: str | PILImage
22+
format_type: str
23+
detail: Literal['low', 'high']
24+
25+
class StreamOptions(TypedDict):
26+
include_usage: bool
27+
28+
class Function(TypedDict):
29+
name: str
30+
description: NotRequired[str]
31+
parameters: NotRequired[Any]
32+
33+
class Tool(TypedDict):
34+
type: str
35+
function: Function
36+
37+
class ToolChoiceFunction(TypedDict):
38+
name: str
39+
40+
class ToolChoice(TypedDict):
41+
type: str
42+
function: ToolChoiceFunction
43+
44+
class RequestParams(TypedDict, total=False):
45+
model: NotRequired[str]
46+
frequency_penalty: NotRequired[float | int]
47+
logit_bias: NotRequired[dict[Any, Any]]
48+
logprobs: NotRequired[bool]
49+
top_logprobs: NotRequired[int]
50+
max_tokens: NotRequired[float | int]
51+
n: NotRequired[float | int]
52+
presence_penalty: NotRequired[float | int]
53+
response_format: NotRequired[ResponseFormat]
54+
seed: NotRequired[int]
55+
stop: NotRequired[str | list[str]]
56+
stream_options: NotRequired[StreamOptions]
57+
temperature: NotRequired[float | int]
58+
top_p: NotRequired[float | int]
59+
tools: NotRequired[list[Tool]]
60+
tool_choice: NotRequired[str | ToolChoice]
61+
user: NotRequired[str]
62+
63+
class Usage(TypedDict):
64+
prompt_tokens: int
65+
completion_tokens: int
66+
67+
class DetailedUsage(TypedDict):
68+
model: str
69+
usage: Usage
70+
time: datetime

setup.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@
44
this_directory = Path(__file__).parent
55
long_description = (this_directory / "README.md").read_text()
66

7-
VERSION = '1.3.3'
7+
VERSION = '1.4.0'
88
DESCRIPTION = 'Streamlining the process of multi-prompting LLMs with chains'
99

1010
setup(

stubgen.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
import subprocess
2+
3+
subprocess.run(["stubgen", "-p", "flowchat", "-o", "."])

0 commit comments

Comments
 (0)