Skip to content

Commit 335d414

Browse files
Remove unused functions and clean up error handling
Removed the unused get_cache_info function from cache_manager.py and its references in __init__.py. Also removed the unused _handle_llm_error function from consensus.py and inlined its logic for clearer error handling. Cleaned up imports in annotate.py.
1 parent 12ac807 commit 335d414

4 files changed

Lines changed: 6 additions & 48 deletions

File tree

python/mllmcelltype/__init__.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
"""mLLMCelltype: A Python module for cell type annotation using various LLMs."""
22

33
from .annotate import annotate_clusters, get_model_response
4-
from .cache_manager import get_cache_info
54
from .consensus import (
65
check_consensus,
76
interactive_consensus_annotation,
@@ -50,7 +49,6 @@
5049
"load_from_cache",
5150
"clear_cache",
5251
"get_cache_stats",
53-
"get_cache_info",
5452
"format_results",
5553
# Prompts
5654
"create_prompt",

python/mllmcelltype/annotate.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,6 @@
22

33
from __future__ import annotations
44

5-
import json
6-
import re
75
import time
86
from typing import Optional, Union
97

python/mllmcelltype/cache_manager.py

Lines changed: 2 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
66
Functions:
77
clear_mllmcelltype_cache(): Interactive cache clearing
8-
get_cache_info(): Get basic information about current cache (delegates to get_cache_stats)
98
clear_cache_cli(): Command-line interface for cache management
109
"""
1110

@@ -36,24 +35,6 @@ def clear_mllmcelltype_cache():
3635
print("No cache directory found.")
3736

3837

39-
def get_cache_info():
40-
"""Get basic information about the current cache state.
41-
42-
This is a convenience wrapper around get_cache_stats(detailed=False).
43-
For detailed statistics including provider counts and timestamps,
44-
use get_cache_stats() instead.
45-
46-
Returns:
47-
dict: Cache info with keys: exists, path, count, size, size_mb
48-
(also includes file_count and total_size for backward compatibility)
49-
"""
50-
stats = get_cache_stats(detailed=False)
51-
# Add backward-compatible keys
52-
stats["file_count"] = stats["count"]
53-
stats["total_size"] = stats["size"]
54-
return stats
55-
56-
5738
def clear_cache_cli():
5839
"""Command-line interface for cache management."""
5940
import sys
@@ -70,9 +51,9 @@ def clear_cache_cli():
7051
print(f"\nCleared {removed} cache files.")
7152
elif len(sys.argv) > 1 and sys.argv[1] == "--info":
7253
# Show cache info
73-
info = get_cache_info()
54+
info = get_cache_stats(detailed=False)
7455
print(f"\nCache directory: {info['path']}")
75-
print(f"Number of cache files: {info['file_count']}")
56+
print(f"Number of cache files: {info['count']}")
7657
print(f"Total cache size: {info['size_mb']:.2f} MB")
7758
else:
7859
# Interactive mode

python/mllmcelltype/consensus.py

Lines changed: 4 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -48,26 +48,6 @@ def _get_api_key(provider: str, api_keys: Optional[dict[str, str]] = None) -> Op
4848
return load_api_key(provider)
4949

5050

51-
def _handle_llm_error(
52-
error: Exception, context: str, attempt: int = 0, max_attempts: int = 1
53-
) -> None:
54-
"""Handle LLM API call errors consistently.
55-
56-
Args:
57-
error: The exception that occurred
58-
context: Context description (e.g., "Qwen attempt", "Claude fallback")
59-
attempt: Current attempt number (0-based)
60-
max_attempts: Maximum number of attempts
61-
"""
62-
if attempt < max_attempts - 1:
63-
write_log(f"Error on {context} {attempt + 1}: {str(error)}", level="warning")
64-
write_log("Waiting before next attempt...")
65-
else:
66-
write_log(f"Error on {context}: {str(error)}", level="warning")
67-
if "attempt" in context.lower():
68-
write_log(f"All {context.split()[0]} retry attempts failed")
69-
70-
7151
def _call_llm_with_retry(
7252
prompt: str,
7353
provider: str,
@@ -119,11 +99,12 @@ def _call_llm_with_retry(
11999
KeyError,
120100
json.JSONDecodeError,
121101
) as e:
122-
_handle_llm_error(e, f"{provider} attempt", attempt, max_retries)
123102
if attempt < max_retries - 1:
103+
write_log(f"Error on {provider} attempt {attempt + 1}/{max_retries}: {str(e)}", level="warning")
124104
time.sleep(5 * (2**attempt))
125105
else:
126-
write_log(f"falling back to {fallback_provider}")
106+
write_log(f"All {provider} retry attempts failed: {str(e)}", level="warning")
107+
write_log(f"Falling back to {fallback_provider}")
127108

128109
# Try fallback provider
129110
if api_keys:
@@ -147,7 +128,7 @@ def _call_llm_with_retry(
147128
KeyError,
148129
json.JSONDecodeError,
149130
) as e:
150-
_handle_llm_error(e, f"{fallback_provider} fallback")
131+
write_log(f"Error on {fallback_provider} fallback: {str(e)}", level="warning")
151132
else:
152133
write_log(f"No {fallback_provider} API key found, falling back to simple consensus")
153134

0 commit comments

Comments
 (0)