from agentic_brain.assertions import (
AssertionRunner,
assert_contains,
assert_json_valid,
)
# 1. Create runner
runner = AssertionRunner()
# 2. Add assertions
runner.add_assertion(assert_contains("success"))
# 3. Validate output
report = runner.validate('{"status": "success"}')
print(f"✓ {report.passed}/{report.total} passed")| Use Case | Validator | Example |
|---|---|---|
| Check substring | assert_contains() |
assert_contains("success") |
| Forbid substring | assert_not_contains() |
assert_not_contains("error") |
| Length bounds | assert_length() |
assert_length(10, 500) |
| Regex pattern | assert_matches() |
assert_matches(r"^\d{3}") |
| Valid JSON | assert_json_valid() |
assert_json_valid() |
| JSON field exists | assert_json_field_exists() |
assert_json_field_exists("data.id") |
| Value in list | assert_in_list() |
assert_in_list(["yes", "no"]) |
| Number in range | assert_numeric_range() |
assert_numeric_range(0, 100) |
| Not empty | assert_non_empty() |
assert_non_empty() |
| Word count | assert_word_count_range() |
assert_word_count_range(10, 100) |
| Valid URL | assert_valid_url() |
assert_valid_url() |
| Custom logic | custom_assertion() |
custom_assertion(my_func, "desc") |
runner = AssertionRunner()
runner.add_assertion(assert_contains("confidence:"))
# Automatically retries up to 3 times with feedback
output = runner.validate_with_retry(
llm_call=lambda: llm.generate("Rate 0-100:"),
max_retries=3
)from agentic_brain.assertions import AssertionSeverity
# SOFT - Warning, continues processing
runner.add_assertion(Assertion(
condition, "Warning",
severity=AssertionSeverity.SOFT
))
# HARD - Error, triggers retry (default)
runner.add_assertion(Assertion(
condition, "Error",
severity=AssertionSeverity.HARD
))
# CRITICAL - Fatal, raises immediately
runner.add_assertion(Assertion(
condition, "Fatal",
severity=AssertionSeverity.CRITICAL
))from agentic_brain.assertions import with_assertions
# Automatically validates return value
@with_assertions(
assert_contains("success"),
assert_json_valid(),
max_retries=2
)
def get_response():
return llm.generate("your prompt")
result = get_response() # Validates with retriesreport = runner.validate(output)
# Statistics
print(f"Passed: {report.passed}/{report.total}")
print(f"Rate: {report.success_rate:.1f}%")
# Failures
failed = report.get_failed_assertions()
print(f"Failed {len(failed)} assertions")
# Feedback for retry
feedback = report.format_feedback()
print(feedback)
# Check status
if report.has_critical_failures:
print("Critical failure!")
if report.should_retry:
print("Should retry")from agentic_brain.assertions import AssertionError
try:
output = runner.validate_with_retry(llm_call)
except AssertionError as e:
if e.retries_exhausted:
print("Max retries exceeded")
else:
print(f"Critical failure: {e.message}")
# Get detailed report
if e.report:
print(e.report.format_feedback())runner = AssertionRunner()
runner.add_assertions(
assert_contains("success"),
assert_not_contains("error"),
assert_length(10, 1000),
assert_json_valid(),
assert_json_field_exists("status"),
)
report = runner.validate(output)def is_professional_tone(text):
"""Check if text is professional."""
unprofessional = ["lol", "omg", "wtf"]
return not any(word in text.lower() for word in unprofessional)
runner.add_assertion(
custom_assertion(
is_professional_tone,
"Professional tone",
feedback="Maintain professional language"
)
)# Track multiple validations
for output in outputs:
runner.validate(output)
stats = runner.get_stats()
print(f"Total validations: {stats['validations']}")
print(f"Passed: {stats['passed']}")
print(f"Failed: {stats['failed']}")
print(f"Retries: {stats['retries']}")
runner.reset_stats()runner = AssertionRunner()
runner.add_assertions(
assert_json_valid(),
assert_json_field_exists("status"),
assert_json_field_exists("data"),
)runner = AssertionRunner()
runner.add_assertions(
assert_contains("def "),
assert_contains(":"),
assert_not_contains("syntax error"),
)runner = AssertionRunner()
runner.add_assertions(
assert_matches(r"^(Yes|No)"),
assert_length(1, 100),
)runner = AssertionRunner()
runner.add_assertions(
assert_numeric_range(0, 100),
assert_contains("confidence"),
)src/agentic_brain/assertions/
├── __init__.py # Public API
├── core.py # Assertion, Report, Severity
├── validators.py # Built-in validators
└── runner.py # AssertionRunner, decorators
tests/
└── test_assertions.py # Comprehensive tests
runner = AssertionRunner(name="validator")
runner.add_assertion(assertion)
runner.add_assertions(a1, a2, a3)
runner.remove_assertion("message")
runner.clear_assertions()
report = runner.validate(output)
output = runner.validate_with_retry(llm_call, max_retries=3)
stats = runner.get_stats()
runner.reset_stats()assertion = Assertion(
condition=lambda x: "test" in x,
message="Contains 'test'",
severity=AssertionSeverity.HARD,
retry_on_fail=True,
feedback_prompt="Add 'test' to output"
)report = runner.validate(output)
# Properties
report.total, report.passed, report.failed
report.success_rate # Float: 0-100
report.all_passed # Bool
report.has_critical_failures # Bool
report.has_hard_failures # Bool
report.should_retry # Bool
# Methods
report.get_failed_assertions()
report.get_failed_by_severity(severity)
report.format_feedback() # String for retrydef test_validation():
runner = AssertionRunner()
runner.add_assertion(assert_contains("test"))
report = runner.validate("this is a test")
assert report.all_passed
report = runner.validate("no match")
assert not report.all_passed-
Use feedback prompts to guide LLM retries:
assertion = Assertion( ..., feedback_prompt="Be concise and clear" )
-
Chain assertions for complex requirements:
runner.add_assertions(a1, a2, a3, a4)
-
Mix severity levels:
runner.add_assertions( Assertion(..., severity=CRITICAL), # Must pass Assertion(..., severity=HARD), # Should pass Assertion(..., severity=SOFT), # Nice to have )
-
Use decorators for automatic validation:
@with_assertions(assert_contains("success")) def my_function(): return "success!"
-
Catch and log failures:
try: output = runner.validate_with_retry(llm_call) except AssertionError as e: logger.error(f"Validation failed: {e.report}")
from agentic_brain.assertions import (
AssertionRunner,
assert_json_valid,
assert_json_field_exists,
assert_numeric_range,
custom_assertion,
)
# Setup validation
runner = AssertionRunner("search_api")
runner.add_assertions(
assert_json_valid("Response is JSON"),
assert_json_field_exists("results"),
assert_json_field_exists("total_count"),
assert_numeric_range(0, 10000, "total_count reasonable"),
custom_assertion(
lambda x: json.loads(x)["status"] in ["ok", "no_results"],
"Valid status"
),
)
# Generate with retry
def search_api(query):
return llm.generate(f"""
Call search API for: {query}
Return JSON with: status, results (array), total_count (number)
""")
try:
result = runner.validate_with_retry(
llm_call=lambda: search_api("python tutorials"),
max_retries=3
)
data = json.loads(result)
print(f"✓ Found {data['total_count']} results")
except AssertionError as e:
print(f"✗ Search failed: {e.report.format_feedback()}")- Assertions: <1ms each (mostly Python function calls)
- Reports: O(n) where n = number of assertions
- Retries: Proportional to max_retries × LLM latency
- Memory: Negligible - stores references, not copies
| Issue | Solution |
|---|---|
| Assertion fails but shouldn't | Check case sensitivity, whitespace |
| Retries don't improve | Verify feedback_prompt guides LLM |
| Too many retries | Reduce max_retries, simplify assertions |
| Need custom validator | Use custom_assertion(callable, message) |
| Performance degradation | Reduce number of assertions, increase max_retries cooldown |