| 1 | package org.egothor.methodatlas.ai; | |
| 2 | ||
| 3 | import java.net.URI; | |
| 4 | import java.net.http.HttpRequest; | |
| 5 | import java.net.http.HttpResponse; | |
| 6 | import java.util.List; | |
| 7 | ||
| 8 | import com.fasterxml.jackson.annotation.JsonIgnoreProperties; | |
| 9 | import com.fasterxml.jackson.annotation.JsonProperty; | |
| 10 | ||
| 11 | /** | |
| 12 | * {@link AiProviderClient} implementation for a locally running | |
| 13 | * <a href="https://ollama.ai/">Ollama</a> inference service. | |
| 14 | * | |
| 15 | * <p> | |
| 16 | * This client submits taxonomy-guided classification prompts to the Ollama HTTP | |
| 17 | * API and converts the returned model response into the internal | |
| 18 | * {@link AiClassSuggestion} representation used by the MethodAtlas AI | |
| 19 | * subsystem. | |
| 20 | * </p> | |
| 21 | * | |
| 22 | * <h2>Operational Responsibilities</h2> | |
| 23 | * | |
| 24 | * <ul> | |
| 25 | * <li>verifying local Ollama availability</li> | |
| 26 | * <li>constructing chat-style inference requests</li> | |
| 27 | * <li>injecting the system prompt and taxonomy-guided user prompt</li> | |
| 28 | * <li>executing HTTP requests against the Ollama API</li> | |
| 29 | * <li>extracting and normalizing JSON classification results</li> | |
| 30 | * </ul> | |
| 31 | * | |
| 32 | * <p> | |
| 33 | * The client uses the Ollama {@code /api/chat} endpoint for inference and the | |
| 34 | * {@code /api/tags} endpoint as a lightweight availability probe. | |
| 35 | * </p> | |
| 36 | * | |
| 37 | * <p> | |
| 38 | * This implementation is intended primarily for local, offline, or | |
| 39 | * privacy-preserving inference scenarios where source code should not be sent | |
| 40 | * to an external provider. | |
| 41 | * </p> | |
| 42 | * | |
| 43 | * @see AiProviderClient | |
| 44 | * @see AiProviderFactory | |
| 45 | * @see AiSuggestionEngine | |
| 46 | */ | |
| 47 | public final class OllamaClient implements AiProviderClient { | |
| 48 | /** | |
| 49 | * System prompt used to enforce deterministic, machine-readable model output. | |
| 50 | * | |
| 51 | * <p> | |
| 52 | * The prompt instructs the model to behave as a strict classification engine | |
| 53 | * and to return JSON only, without markdown fences or explanatory prose, so | |
| 54 | * that the response can be parsed automatically. | |
| 55 | * </p> | |
| 56 | */ | |
| 57 | private static final String SYSTEM_PROMPT = """ | |
| 58 | You are a precise software security classification engine. | |
| 59 | You classify JUnit 5 tests and return strict JSON only. | |
| 60 | Never include markdown fences, explanations, or extra text. | |
| 61 | """; | |
| 62 | ||
| 63 | private final AiOptions options; | |
| 64 | private final HttpSupport httpSupport; | |
| 65 | ||
| 66 | /** | |
| 67 | * Creates a new Ollama client with no rate-limit notification. | |
| 68 | * | |
| 69 | * <p>Rate-limit pauses are handled transparently. Use | |
| 70 | * {@link #OllamaClient(AiOptions, RateLimitListener)} when callers need | |
| 71 | * to be notified of such pauses.</p> | |
| 72 | * | |
| 73 | * @param options AI runtime configuration | |
| 74 | */ | |
| 75 | public OllamaClient(AiOptions options) { | |
| 76 | this(options, (w, a, m) -> {}); | |
| 77 | } | |
| 78 | ||
| 79 | /** | |
| 80 | * Creates a new Ollama client that notifies {@code rateLimitListener} | |
| 81 | * before each rate-limit sleep. | |
| 82 | * | |
| 83 | * @param options AI runtime configuration | |
| 84 | * @param rateLimitListener callback invoked before each HTTP 429 | |
| 85 | * pause; must not be {@code null} | |
| 86 | * @see RateLimitListener | |
| 87 | */ | |
| 88 | public OllamaClient(AiOptions options, RateLimitListener rateLimitListener) { | |
| 89 | this.options = options; | |
| 90 | this.httpSupport = new HttpSupport(options.timeout(), options.maxRetries(), rateLimitListener); | |
| 91 | } | |
| 92 | ||
| 93 | /** | |
| 94 | * Determines whether the configured Ollama service is reachable. | |
| 95 | * | |
| 96 | * <p> | |
| 97 | * The method performs a lightweight availability probe against the | |
| 98 | * {@code /api/tags} endpoint. If the endpoint responds successfully, the | |
| 99 | * provider is considered available. | |
| 100 | * </p> | |
| 101 | * | |
| 102 | * <p> | |
| 103 | * Any exception raised during the probe is treated as an indication that the | |
| 104 | * provider is unavailable. | |
| 105 | * </p> | |
| 106 | * | |
| 107 | * @return {@code true} if the Ollama service is reachable; {@code false} | |
| 108 | * otherwise | |
| 109 | */ | |
| 110 | @Override | |
| 111 | public boolean isAvailable() { | |
| 112 | try { | |
| 113 | URI uri = URI.create(options.baseUrl() + "/api/tags"); | |
| 114 | HttpRequest request = HttpRequest.newBuilder(uri).GET().timeout(options.timeout()).build(); | |
| 115 | ||
| 116 | httpSupport.httpClient().send(request, HttpResponse.BodyHandlers.discarding()); | |
| 117 | ||
| 118 |
1
1. isAvailable : replaced boolean return with false for org/egothor/methodatlas/ai/OllamaClient::isAvailable → KILLED |
return true; |
| 119 | } catch (Exception e) { | |
| 120 |
1
1. isAvailable : replaced boolean return with true for org/egothor/methodatlas/ai/OllamaClient::isAvailable → KILLED |
return false; |
| 121 | } | |
| 122 | } | |
| 123 | ||
| 124 | /** | |
| 125 | * Submits a classification request to the Ollama chat API for the specified | |
| 126 | * test class. | |
| 127 | * | |
| 128 | * <p> | |
| 129 | * The request consists of: | |
| 130 | * </p> | |
| 131 | * <ul> | |
| 132 | * <li>a system prompt enforcing strict JSON output</li> | |
| 133 | * <li>a user prompt containing the test class source and taxonomy text</li> | |
| 134 | * <li>provider options such as deterministic temperature settings</li> | |
| 135 | * </ul> | |
| 136 | * | |
| 137 | * <p> | |
| 138 | * The returned response is expected to contain a JSON object in the message | |
| 139 | * content field. That JSON text is extracted, deserialized into an | |
| 140 | * {@link AiClassSuggestion}, and then normalized before being returned. | |
| 141 | * </p> | |
| 142 | * | |
| 143 | * @param fqcn fully qualified class name being analyzed | |
| 144 | * @param classSource complete source code of the class being analyzed | |
| 145 | * @param taxonomyText taxonomy definition guiding classification | |
| 146 | * @param targetMethods deterministically extracted JUnit test methods that must | |
| 147 | * be classified | |
| 148 | * @return normalized AI classification result | |
| 149 | * | |
| 150 | * @throws AiSuggestionException if the request fails, if the provider returns | |
| 151 | * invalid content, or if response deserialization | |
| 152 | * fails | |
| 153 | */ | |
| 154 | @Override | |
| 155 | public AiClassSuggestion suggestForClass(String fqcn, String classSource, String taxonomyText, | |
| 156 | List<PromptBuilder.TargetMethod> targetMethods) throws AiSuggestionException { | |
| 157 | try { | |
| 158 | String prompt = PromptBuilder.build(fqcn, classSource, taxonomyText, targetMethods, options.confidence()); | |
| 159 | ||
| 160 | ChatRequest payload = new ChatRequest(options.modelName(), | |
| 161 | List.of(new Message("system", SYSTEM_PROMPT), new Message("user", prompt)), false, | |
| 162 | new Options(0.0)); | |
| 163 | ||
| 164 | String requestBody = httpSupport.objectMapper().writeValueAsString(payload); | |
| 165 | URI uri = URI.create(options.baseUrl() + "/api/chat"); | |
| 166 | ||
| 167 | HttpRequest request = httpSupport.jsonPost(uri, requestBody, options.timeout()).build(); | |
| 168 | String responseBody = httpSupport.postJson(request); | |
| 169 | ChatResponse response = httpSupport.objectMapper().readValue(responseBody, ChatResponse.class); | |
| 170 | ||
| 171 |
6
1. suggestForClass : removed conditional - replaced equality check with true → SURVIVED 2. suggestForClass : removed conditional - replaced equality check with true → SURVIVED 3. suggestForClass : removed conditional - replaced equality check with false → SURVIVED 4. suggestForClass : removed conditional - replaced equality check with true → KILLED 5. suggestForClass : removed conditional - replaced equality check with false → KILLED 6. suggestForClass : removed conditional - replaced equality check with false → KILLED |
if (response.message() == null || response.message().content() == null || response.message().content().isBlank()) { |
| 172 | throw new AiSuggestionException("Ollama returned no message content"); | |
| 173 | } | |
| 174 | ||
| 175 | String json = JsonText.extractFirstJsonObject(response.message().content()); | |
| 176 | AiClassSuggestion suggestion = httpSupport.objectMapper().readValue(json, AiClassSuggestion.class); | |
| 177 |
1
1. suggestForClass : replaced return value with null for org/egothor/methodatlas/ai/OllamaClient::suggestForClass → KILLED |
return normalize(suggestion); |
| 178 | ||
| 179 | } catch (Exception e) { // NOPMD | |
| 180 | throw new AiSuggestionException("Ollama suggestion failed for " + fqcn, e); | |
| 181 | } | |
| 182 | } | |
| 183 | ||
| 184 | /** | |
| 185 | * Normalizes a provider response into the application's internal result | |
| 186 | * invariants. | |
| 187 | * | |
| 188 | * <p> | |
| 189 | * The method ensures that collection-valued fields are never {@code null} and | |
| 190 | * removes malformed method entries that do not define a usable method name. | |
| 191 | * </p> | |
| 192 | * | |
| 193 | * @param input raw suggestion returned by the provider | |
| 194 | * @return normalized suggestion | |
| 195 | */ | |
| 196 | private static AiClassSuggestion normalize(AiClassSuggestion input) { | |
| 197 |
2
1. normalize : removed conditional - replaced equality check with false → SURVIVED 2. normalize : removed conditional - replaced equality check with true → KILLED |
List<AiMethodSuggestion> methods = input.methods() == null ? List.of() : input.methods(); |
| 198 |
2
1. normalize : removed conditional - replaced equality check with true → SURVIVED 2. normalize : removed conditional - replaced equality check with false → KILLED |
List<String> classTags = input.classTags() == null ? List.of() : input.classTags(); |
| 199 | ||
| 200 | List<AiMethodSuggestion> normalizedMethods = methods.stream() | |
| 201 |
7
1. lambda$normalize$1 : removed conditional - replaced equality check with true → SURVIVED 2. lambda$normalize$1 : removed conditional - replaced equality check with false → KILLED 3. lambda$normalize$1 : removed conditional - replaced equality check with true → KILLED 4. lambda$normalize$1 : removed conditional - replaced equality check with true → KILLED 5. lambda$normalize$1 : replaced boolean return with true for org/egothor/methodatlas/ai/OllamaClient::lambda$normalize$1 → KILLED 6. lambda$normalize$1 : removed conditional - replaced equality check with false → KILLED 7. lambda$normalize$1 : removed conditional - replaced equality check with false → KILLED |
.filter(method -> method != null && method.methodName() != null && !method.methodName().isBlank()) |
| 202 |
1
1. lambda$normalize$2 : replaced return value with null for org/egothor/methodatlas/ai/OllamaClient::lambda$normalize$2 → KILLED |
.map(method -> new AiMethodSuggestion(method.methodName(), method.securityRelevant(), |
| 203 |
2
1. lambda$normalize$2 : removed conditional - replaced equality check with true → SURVIVED 2. lambda$normalize$2 : removed conditional - replaced equality check with false → KILLED |
method.displayName(), method.tags() == null ? List.of() : method.tags(), method.reason(), |
| 204 | method.confidence(), method.interactionScore())) | |
| 205 | .toList(); | |
| 206 | ||
| 207 |
1
1. normalize : replaced return value with null for org/egothor/methodatlas/ai/OllamaClient::normalize → KILLED |
return new AiClassSuggestion(input.className(), input.classSecurityRelevant(), classTags, input.classReason(), |
| 208 | normalizedMethods); | |
| 209 | } | |
| 210 | ||
| 211 | /** | |
| 212 | * Request payload sent to the Ollama chat API. | |
| 213 | * | |
| 214 | * <p> | |
| 215 | * This record models the JSON structure expected by the {@code /api/chat} | |
| 216 | * endpoint. | |
| 217 | * </p> | |
| 218 | * | |
| 219 | * @param model model identifier used for inference | |
| 220 | * @param messages ordered chat messages sent to the model | |
| 221 | * @param stream whether streaming responses are requested | |
| 222 | * @param options provider-specific inference options | |
| 223 | */ | |
| 224 | private record ChatRequest(String model, List<Message> messages, boolean stream, Options options) { | |
| 225 | } | |
| 226 | ||
| 227 | /** | |
| 228 | * Chat message sent to the Ollama API. | |
| 229 | * | |
| 230 | * @param role logical role of the message sender, such as {@code system} or | |
| 231 | * {@code user} | |
| 232 | * @param content textual message content | |
| 233 | */ | |
| 234 | private record Message(String role, String content) { | |
| 235 | } | |
| 236 | ||
| 237 | /** | |
| 238 | * Provider-specific inference options supplied to the Ollama API. | |
| 239 | * | |
| 240 | * <p> | |
| 241 | * Currently only the {@code temperature} sampling parameter is configured. | |
| 242 | * Temperature controls the randomness of model output: | |
| 243 | * </p> | |
| 244 | * | |
| 245 | * <ul> | |
| 246 | * <li>{@code 0.0} produces deterministic output</li> | |
| 247 | * <li>higher values increase variation and creativity</li> | |
| 248 | * </ul> | |
| 249 | * | |
| 250 | * <p> | |
| 251 | * The MethodAtlas AI integration explicitly sets {@code temperature} to | |
| 252 | * {@code 0.0} in order to obtain stable, repeatable classification results and | |
| 253 | * strictly formatted JSON output suitable for automated parsing. | |
| 254 | * </p> | |
| 255 | * | |
| 256 | * <p> | |
| 257 | * Allowing stochastic sampling would significantly increase the probability | |
| 258 | * that the model produces explanatory text, formatting variations, or malformed | |
| 259 | * JSON responses, which would break the downstream deserialization pipeline. | |
| 260 | * </p> | |
| 261 | * | |
| 262 | * @param temperature sampling temperature controlling response randomness | |
| 263 | */ | |
| 264 | private record Options(@JsonProperty("temperature") Double temperature) { | |
| 265 | } | |
| 266 | ||
| 267 | /** | |
| 268 | * Partial response model returned by the Ollama chat API. | |
| 269 | * | |
| 270 | * <p> | |
| 271 | * Only the fields required by this client are modeled. Unknown properties are | |
| 272 | * ignored to maintain compatibility with future API extensions. | |
| 273 | * </p> | |
| 274 | * | |
| 275 | * @param message the response message payload | |
| 276 | */ | |
| 277 | @JsonIgnoreProperties(ignoreUnknown = true) | |
| 278 | private record ChatResponse(ResponseMessage message) { | |
| 279 | } | |
| 280 | ||
| 281 | /** | |
| 282 | * Message payload returned within an Ollama chat response. | |
| 283 | * | |
| 284 | * <p> | |
| 285 | * The client reads the {@code content} component and expects it to contain the | |
| 286 | * JSON classification result generated by the model. | |
| 287 | * </p> | |
| 288 | * | |
| 289 | * @param content the textual content of the message | |
| 290 | */ | |
| 291 | @JsonIgnoreProperties(ignoreUnknown = true) | |
| 292 | private record ResponseMessage(String content) { | |
| 293 | } | |
| 294 | } | |
Mutations | ||
| 118 |
1.1 |
|
| 120 |
1.1 |
|
| 171 |
1.1 2.2 3.3 4.4 5.5 6.6 |
|
| 177 |
1.1 |
|
| 197 |
1.1 2.2 |
|
| 198 |
1.1 2.2 |
|
| 201 |
1.1 2.2 3.3 4.4 5.5 6.6 7.7 |
|
| 202 |
1.1 |
|
| 203 |
1.1 2.2 |
|
| 207 |
1.1 |