Skip to content

Commit 1264aec

Browse files
committed
Fix extension checks
Merged latest version from main branch
1 parent 8678ade commit 1264aec

File tree

3 files changed

+28
-11
lines changed

3 files changed

+28
-11
lines changed

.github/workflows/extension-ai-analysis.yml

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,9 @@ jobs:
7070
7171
- name: Run AI analysis
7272
env:
73-
NEBULA_API_KEY: ${{ secrets.NEBULA_API_KEY }}
73+
INFERENCE_URL: ${{ vars.INFERENCE_URL }}
74+
INFERENCE_MODEL: ${{ vars.INFERENCE_MODEL }}
75+
INFERENCE_API_KEY: ${{ secrets.INFERENCE_API_KEY }}
7476
id: ai-analysis
7577
run: |
7678
RESULT=$(python scripts/extension_ai_analysis.py \

scripts/check_description_files.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -105,9 +105,10 @@ def check_json_file_format(extension_name, metadata, extension_file_path):
105105
extension_name, check_name,
106106
f"Invalid JSON format: {str(e)}")
107107
# Force using LF-only line endings
108-
with open(extension_file_path, 'r', encoding='utf-8') as f:
108+
# Must open in binary mode to detect line endings
109+
with open(extension_file_path, 'rb') as f:
109110
content = f.read()
110-
if '\r\n' in content or '\r' in content:
111+
if b'\r\n' in content or b'\r' in content:
111112
raise ExtensionCheckError(
112113
extension_name, check_name,
113114
"File contains non-LF line endings (CR or CRLF). Please convert to LF-only line endings.")

scripts/extension_ai_analysis.py

Lines changed: 22 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -15,13 +15,20 @@
1515
import subprocess
1616
import shutil
1717

18-
# Use Nebula Block API endpoint for chat completions.
19-
# It offers capable models for free with an OpenAI-compatible API.
20-
INFERENCE_URL = "https://inference.nebulablock.com/v1/chat/completions"
21-
INFERENCE_MODEL = "mistralai/Mistral-Small-3.2-24B-Instruct-2506"
22-
INFERENCE_RESPONSE_PER_MINUTE_LIMIT = 4 # slow down to not exceed token per minute (tpm) limit of 60k
23-
INFERENCE_API_KEY = os.getenv("NEBULA_API_KEY")
24-
INFERENCE_MAX_CHARACTERS = 100000 # max characters in all files provided to the model, approximately 25k tokens (limit is 32k)
18+
19+
# Get inference server configuration from environment variables
20+
INFERENCE_URL = os.getenv("INFERENCE_URL")
21+
if not INFERENCE_URL:
22+
raise ValueError("INFERENCE_URL environment variable is not set. Please set it before running the script.")
23+
INFERENCE_MODEL = os.getenv("INFERENCE_MODEL")
24+
if not INFERENCE_MODEL:
25+
raise ValueError("INFERENCE_MODEL environment variable is not set. Please set it before running the script.")
26+
INFERENCE_API_KEY = os.getenv("INFERENCE_API_KEY")
27+
if not INFERENCE_API_KEY:
28+
raise ValueError("INFERENCE_API_KEY environment variable is not set. Please set it before running the script.")
29+
30+
INFERENCE_RESPONSE_PER_MINUTE_LIMIT = 10 # slow down to not exceed token per minute (tpm) limit
31+
INFERENCE_MAX_CHARACTERS = 400000 # max characters in all files provided to the model, approximately 100k tokens
2532

2633
QUESTIONS = [
2734
["Is there a EXTENSION_DESCRIPTION variable in the CMakeLists.txt file that describes what the extension does in a few sentences that can be understood by a person knowledgeable in medical image computing?", ["cmake"]],
@@ -130,7 +137,9 @@ def collect_analyzed_files(folder):
130137
def ask_question(system_msg, question):
131138
headers = {
132139
"Content-Type": "application/json",
133-
"Authorization": f"Bearer {INFERENCE_API_KEY}"
140+
"Authorization": f"Bearer {INFERENCE_API_KEY}",
141+
"HTTP-Referer": "slicer.org", # Optional. Site URL for rankings on openrouter.ai.
142+
"X-Title": "3D Slicer", # Optional. Site title for rankings on openrouter.ai.
134143
}
135144

136145
messages = [
@@ -157,6 +166,11 @@ def ask_question(system_msg, question):
157166
try:
158167
answer = response.json()["choices"][0]["message"]["content"]
159168
except Exception as e:
169+
import traceback
170+
traceback.print_exc()
171+
print(f"Request data: {data}", file=sys.stderr)
172+
print(f"Response status code: {response.status_code}", file=sys.stderr)
173+
print(f"Response content: {response.text}", file=sys.stderr)
160174
raise RuntimeError(f"Error or unexpected response: {response.json()["error"]["message"]}")
161175

162176
return answer

0 commit comments

Comments
 (0)