Coverage for codexa/client/accessor.py: 89%
37 statements
« prev ^ index » next coverage.py v7.9.2, created at 2025-08-10 07:53 +0000
« prev ^ index » next coverage.py v7.9.2, created at 2025-08-10 07:53 +0000
1from openai import OpenAI
3from codexa.core.errors import CodexaAccessorError
6class RemoteAIAccessor:
7 """Class for interacting with remote LLM APIs.
9 Uses the DeepSeek R1 free model by default.
10 """
12 def __init__(
13 self,
14 api_key: str,
15 prompt: str,
16 base_url: str = "https://openrouter.ai/api/v1",
17 model: str = "deepseek/deepseek-r1:free",
18 ) -> None:
19 if not api_key:
20 raise CodexaAccessorError("API key is required")
21 if not prompt:
22 raise CodexaAccessorError("Accessor requires setup prompt")
23 self.__api_key = api_key
24 self.__base_url = base_url
25 self.__model = model
26 self.__client = OpenAI(api_key=self.__api_key, base_url=self.__base_url)
27 self.__setup_prompt = prompt
29 @property
30 def setup_prompt(self) -> str:
31 """Return the setup prompt."""
32 return self.__setup_prompt
34 def make_request(self, message: str, timeout: float = 60.0) -> str:
35 """Make a request to the LLM API.
37 Args:
38 message (str): Interaction message
39 timeout (float, optional): Request timeout (s), defaults to 60.0.
41 Raises:
42 CodexaGenerationError: If the LLM API call fails
44 Returns:
45 str: LLM response text
46 """
47 response = self.__client.chat.completions.create(
48 model=self.__model,
49 messages=[
50 {"role": "system", "content": self.__setup_prompt},
51 {"role": "user", "content": message},
52 ],
53 stream=False,
54 timeout=timeout,
55 )
56 content = response.choices[0].message.content
57 if not content:
58 reason = response.choices[0].message.refusal
59 raise CodexaAccessorError(
60 message=f"Failed to generate code: {reason}",
61 help_text="Please try again re-running the command",
62 )
63 return content
66class ReportScanner(RemoteAIAccessor):
67 """Class for generating test report summary."""
69 def __init__(self, api_key: str):
70 self.__setup_prompt = """Take on the role of a Senior QA Engineer.
72 I need to implement testing in Python. I am using Pytest as my test harness.
73 Your primary task is to read the Pytest execution output and write a summary
74 report.
76 Key requirements:
77 1. Report should be written in a way that is easy to understand.
78 2. Report must provide actionable steps for fixing the issues.
79 3. Report should be written in Markdown syntax, properly formatted.
81 Sections that the report must include:
82 1. Summary of the test run
83 2. Per error, a summary of the error and the steps to fix it
84 3. Any other relevant information
86 Please consider:
87 - Test writing best practices
88 - ISTQB Tester guidelines
90 From this step forward, I will provide you with the execution output and you
91 will write the report. Minimize chat response, focus on the code. Provide ONLY
92 the summary, not write additional comments or greetings.
93 """
95 super().__init__(api_key, prompt=self.__setup_prompt)
97 def analyze_tests(self, test_output: str, timeout: float = 60.0) -> str:
98 """Read the test execution output and generate a report.
100 Args:
101 test_output (str): Pytest execution output
103 Returns:
104 str: Generated test summary report
105 """
106 message = f"Generate a report for the following test output:\n\n{test_output}"
107 return self.make_request(message, timeout)
110class RepoAnalyzer(RemoteAIAccessor):
111 """Class for analyzing the repository."""
113 def __init__(self, api_key: str):
114 self.__setup_prompt = """# Overview
116 Take on the role of a Senior Software Engineer,
117 specializing in automated testing, code review, and test strategy.
119 You will be given a `git diff` between the current working tree and a remote reference
120 (usually the main branch). Your task is to analyze the diff and identify what areas of
121 the codebase have changed, and what testing actions are necessary based on those changes.
123 Your objective is to **guide the author on what tests are required or should be updated**.
125 ## Goals
126 - Review the changed files and modified code in the diff.
127 - Identify which functions, classes, or modules have been added, modified, or removed.
128 - Detect if new logic paths, branches, conditions, or data flows are introduced.
129 - Determine whether the existing tests need to be updated or if new tests should be created.
130 - Recommend specific **types of tests** required (e.g., unit, integration, regression, edge cases).
131 - If test files are included in the diff, comment on their adequacy and suggest improvements.
132 - Flag if changes to existing test cases might break or misrepresent the new behavior.
133 - If no changes in test files are detected, but logic changes exist, point out the test coverage gap.
135 ## Output Format
137 Respond in **Markdown** with the following sections:
139 - Summary
140 - Areas Requiring Tests
141 - Suggested Tests
142 - Risks
144 Follow proper Markdownlint formatting and syntax. Ensure to add spaces after headers.
145 Don't put the triple-backticks (```) in the output, format the response as if it were
146 to be pasted into a Markdown file directly.
148 ## Additional Notes
150 - Be precise and concise; prefer bullet points where helpful.
151 - Favor actionable suggestions over verbose explanations.
152 - You are not writing the tests — you are identifying and planning them.
153 """
155 super().__init__(api_key, prompt=self.__setup_prompt)
157 def compare_diff(self, diff: str, timeout: float = 60.0) -> str:
158 """Assess the repository changes and generate a report.
160 Args:
161 diff (str): Repository changes
163 Returns:
164 str: Generated test summary report
165 """
166 message = f"Prepare an analysis and report for this diff:\n\n{diff}"
167 return self.make_request(message, timeout)