Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
101 changes: 101 additions & 0 deletions sdk/ai/azure-ai-projects/azure/ai/projects/_patch.pyi
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
"""Type stub for _patch.py.

Overrides get_openai_client() return type so that evals.create() accepts
Azure-specific grader types in addition to the standard OpenAI graders.
"""

from typing import Any, Iterable, Union, Optional
from httpx import Timeout
from openai import NotGiven, Omit, OpenAI as OpenAIClient
from openai._types import Body, Query, Headers
from openai.resources.evals.evals import Evals
from openai.resources.evals.runs.runs import Runs
from openai.types.evals.create_eval_jsonl_run_data_source_param import CreateEvalJSONLRunDataSourceParam
from openai.types.evals.create_eval_completions_run_data_source_param import CreateEvalCompletionsRunDataSourceParam
from openai.types.evals.run_create_params import DataSourceCreateEvalResponsesRunDataSource
from openai.types.evals.run_create_response import RunCreateResponse
from openai.types.eval_create_params import (
DataSourceConfigCustom,
DataSourceConfigLogs,
DataSourceConfigStoredCompletions,
TestingCriterionLabelModel,
TestingCriterionTextSimilarity,
TestingCriterionPython,
TestingCriterionScoreModel,
)
from openai.types.graders.string_check_grader_param import StringCheckGraderParam
from openai.types.eval_create_response import EvalCreateResponse
from openai.types.shared_params.metadata import Metadata
from ._client import AIProjectClient as AIProjectClientGenerated
from .models import (
AzureAIBenchmarkPreviewEvalRunDataSource,
AzureAIDataSourceConfig,
AzureAIResponsesEvalRunDataSource,
EvalCsvRunDataSource,
EvalGraderAzureAIEvaluator,
RedTeamEvalRunDataSource,
TargetCompletionEvalRunDataSource,
TracesPreviewEvalRunDataSource,
)

class _AzureEvalRuns(Runs):
def create(
self,
eval_id: str,
*,
data_source: Union[
CreateEvalJSONLRunDataSourceParam,
CreateEvalCompletionsRunDataSourceParam,
DataSourceCreateEvalResponsesRunDataSource,
AzureAIBenchmarkPreviewEvalRunDataSource,
AzureAIResponsesEvalRunDataSource,
EvalCsvRunDataSource,
RedTeamEvalRunDataSource,
TargetCompletionEvalRunDataSource,
TracesPreviewEvalRunDataSource,
], # <=== Azure extention here
metadata: Optional[Metadata] | Omit = ...,
name: str | Omit = ...,
extra_headers: Headers | None = ...,
extra_query: Query | None = ...,
extra_body: Body | None = ...,
timeout: float | Timeout | None | NotGiven = ...,
) -> RunCreateResponse: ...

class _AzureEvals(Evals):
def create(
self,
*,
data_source_config: Union[
DataSourceConfigCustom, DataSourceConfigLogs, DataSourceConfigStoredCompletions, AzureAIDataSourceConfig
], # <=== Azure extention here
testing_criteria: Iterable[
Union[
TestingCriterionLabelModel,
StringCheckGraderParam,
TestingCriterionTextSimilarity,
TestingCriterionPython,
TestingCriterionScoreModel,
EvalGraderAzureAIEvaluator, # <=== Azure extention here
]
],
metadata: Optional[Metadata] | Omit | None = ...,
name: str | Omit = ...,
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | Timeout | NotGiven | None = ...,
) -> EvalCreateResponse: ...
@property
def runs(self) -> _AzureEvalRuns: ...

class OpenAI(OpenAIClient):
@property
def evals(self) -> _AzureEvals: ...

class AIProjectClient(AIProjectClientGenerated):
def get_openai_client(self, **kwargs: Any) -> OpenAI: ...
28 changes: 27 additions & 1 deletion sdk/ai/azure-ai-projects/azure/ai/projects/models/_patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,20 @@
"""

from typing import Final, FrozenSet, List, Dict, Mapping, Optional, Any, Tuple
from ._patch_evaluation_typeddicts import (
AzureAIBenchmarkPreviewEvalRunDataSource,
AzureAIDataSourceConfig,
AzureAIModelTarget,
AzureAIResponsesEvalRunDataSource,
EvalCsvFileIdSource,
EvalCsvRunDataSource,
EvalGraderAzureAIEvaluator,
ModelSamplingParams,
RedTeamEvalRunDataSource,
ResponseRetrievalItemGenerationParams,
TargetCompletionEvalRunDataSource,
TracesPreviewEvalRunDataSource,
)
from azure.core.polling import LROPoller, AsyncLROPoller, PollingMethod, AsyncPollingMethod
from azure.core.polling.base_polling import (
LROBasePolling,
Expand Down Expand Up @@ -346,9 +360,21 @@ def from_continuation_token(


__all__: List[str] = [
"AsyncUpdateMemoriesLROPoller",
"AzureAIBenchmarkPreviewEvalRunDataSource",
"AzureAIDataSourceConfig",
"AzureAIModelTarget",
"AzureAIResponsesEvalRunDataSource",
"CustomCredential",
"EvalCsvFileIdSource",
"EvalCsvRunDataSource",
"EvalGraderAzureAIEvaluator",
"ModelSamplingParams",
"RedTeamEvalRunDataSource",
"ResponseRetrievalItemGenerationParams",
"TargetCompletionEvalRunDataSource",
"TracesPreviewEvalRunDataSource",
"UpdateMemoriesLROPoller",
"AsyncUpdateMemoriesLROPoller",
] # Add all objects you want publicly available to users at this package level


Expand Down
Loading
Loading