classAnalyseArtifactsAPI(ls.LitAPI):"""API endpoint for artifact analysis. Provides a LitServe API for analyzing ML artifacts (datasets, training, deepchecks, model checkpoints) and returning diagnostic results. """defsetup(self,device:str)->None:"""Setup the API endpoint. Initializes logging and creates the artifact analysis coordinator. Args: device: Device specification (unused, kept for LitAPI compatibility). """try:setup_dspy_logging()exceptException:print(f"Error setting up DSPy logging: {traceback.format_exc()}")llm_config=LLMConfig.load_from_env()self.coordinator=ArtifactAnalysisCoordinator(llm_config=llm_config)defdecode_request(self,request:APIRequest)->AgentContext:"""Decode API request into AgentContext. Args: request: APIRequest containing artifacts and configuration. Returns: AgentContext with artifacts and settings. Raises: HTTPException: If request decoding fails (status 400). """try:returnAgentContext(dataset_artifacts=request.dataset_artifacts,training_artifacts=request.training_artifacts,deepchecks_artifacts=request.deepchecks_artifacts,model_checkpoint_artifacts=request.model_checkpoint_artifacts,dataset_name=request.dataset_name,language=request.language,)exceptExceptionase:raiseHTTPException(status_code=400,detail=f"Error decoding request: {e}",)defpredict(self,request_ctx:AgentContext)->APIResponse:"""Run artifact analysis and return results. Args: request_ctx: AgentContext containing artifacts to analyze. Returns: APIResponse with analysis results from all agents. Raises: HTTPException: If analysis fails (status 500). """try:results=self.coordinator.run(request_ctx)response=APIResponse(agent_results=results.get_agent_results(),summary=results.summary,additional_outputs=results.additional_outputs,error_messages=results.get_error_messages(),dataset_name=request_ctx.dataset_name,)returnresponseexceptException:raiseHTTPException(status_code=500,detail=traceback.format_exc())
defdecode_request(self,request:APIRequest)->AgentContext:"""Decode API request into AgentContext. Args: request: APIRequest containing artifacts and configuration. Returns: AgentContext with artifacts and settings. Raises: HTTPException: If request decoding fails (status 400). """try:returnAgentContext(dataset_artifacts=request.dataset_artifacts,training_artifacts=request.training_artifacts,deepchecks_artifacts=request.deepchecks_artifacts,model_checkpoint_artifacts=request.model_checkpoint_artifacts,dataset_name=request.dataset_name,language=request.language,)exceptExceptionase:raiseHTTPException(status_code=400,detail=f"Error decoding request: {e}",)
defpredict(self,request_ctx:AgentContext)->APIResponse:"""Run artifact analysis and return results. Args: request_ctx: AgentContext containing artifacts to analyze. Returns: APIResponse with analysis results from all agents. Raises: HTTPException: If analysis fails (status 500). """try:results=self.coordinator.run(request_ctx)response=APIResponse(agent_results=results.get_agent_results(),summary=results.summary,additional_outputs=results.additional_outputs,error_messages=results.get_error_messages(),dataset_name=request_ctx.dataset_name,)returnresponseexceptException:raiseHTTPException(status_code=500,detail=traceback.format_exc())
setup(device)
Setup the API endpoint.
Initializes logging and creates the artifact analysis coordinator.
Parameters:
Name
Type
Description
Default
device
str
Device specification (unused, kept for LitAPI compatibility).
required
Source code in deepfix-server\src\deepfix_server\api.py
defsetup(self,device:str)->None:"""Setup the API endpoint. Initializes logging and creates the artifact analysis coordinator. Args: device: Device specification (unused, kept for LitAPI compatibility). """try:setup_dspy_logging()exceptException:print(f"Error setting up DSPy logging: {traceback.format_exc()}")llm_config=LLMConfig.load_from_env()self.coordinator=ArtifactAnalysisCoordinator(llm_config=llm_config)
Coordinators
ArtifactAnalysisCoordinator
Main orchestrator that coordinates specialized analyzer agents.
Source code in deepfix-server\src\deepfix_server\coordinators.py
classLLMConfig(BaseModel):"""Configuration for LLM provider settings. Attributes: api_key: Optional API key for the LLM provider. base_url: Optional base URL for the LLM API endpoint. model_name: Name of the LLM model to use. temperature: Sampling temperature for text generation. Defaults to 0.7. max_tokens: Maximum number of tokens to generate. Defaults to 8000. cache: Whether to cache LLM requests. Defaults to True. track_usage: Whether to track LLM usage. Defaults to True. """api_key:Optional[str]=Field(default=None,description="API key for the LLM provider")base_url:Optional[str]=Field(default=None,description="Base URL for the LLM API")model_name:str=Field(default=None,description="Model name to use for the LLM")temperature:float=Field(default=0.7,description="Sampling temperature for text generation")max_tokens:int=Field(default=8000,description="Maximum tokens to generate in the response")cache:bool=Field(default=True,description="Cache request")track_usage:bool=Field(default=True,description="Track usage")@classmethoddefload_from_env(cls,env_file:Optional[str]=None)->"LLMConfig":"""Load LLM configuration from environment variables. Reads the following environment variables: - DEEPFIX_LLM_API_KEY - DEEPFIX_LLM_BASE_URL - DEEPFIX_LLM_MODEL_NAME - DEEPFIX_LLM_TEMPERATURE - DEEPFIX_LLM_MAX_TOKENS - DEEPFIX_LLM_CACHE - DEEPFIX_LLM_TRACK_USAGE Args: env_file: Optional path to .env file to load. Returns: LLMConfig instance populated from environment variables. """ifenv_fileisnotNone:load_dotenv(env_file)api_key=os.getenv("DEEPFIX_LLM_API_KEY")base_url=os.getenv("DEEPFIX_LLM_BASE_URL")model_name=os.getenv("DEEPFIX_LLM_MODEL_NAME")temperature=float(os.getenv("DEEPFIX_LLM_TEMPERATURE"))max_tokens=int(os.getenv("DEEPFIX_LLM_MAX_TOKENS"))cache=bool(os.getenv("DEEPFIX_LLM_CACHE"))track_usage=bool(os.getenv("DEEPFIX_LLM_TRACK_USAGE"))returncls(api_key=api_key,base_url=base_url,model_name=model_name,temperature=temperature,max_tokens=max_tokens,cache=cache,track_usage=track_usage,)
load_from_env(env_file=None)classmethod
Load LLM configuration from environment variables.
Reads the following environment variables:
- DEEPFIX_LLM_API_KEY
- DEEPFIX_LLM_BASE_URL
- DEEPFIX_LLM_MODEL_NAME
- DEEPFIX_LLM_TEMPERATURE
- DEEPFIX_LLM_MAX_TOKENS
- DEEPFIX_LLM_CACHE
- DEEPFIX_LLM_TRACK_USAGE