Skip to content

Frontend API

Frontend analysis components for AST analysis, type inference, and optimization.

AST Analyzer

multigen.frontend.ast_analyzer

AST Analysis Framework for Static Python Code.

This module provides comprehensive AST analysis capabilities for the frontend layer, focusing on static analysis of Python code that can be converted to C.

ASTAnalyzer

Bases: NodeVisitor

Enhanced AST analyzer for static Python code analysis.

Source code in src/multigen/frontend/ast_analyzer.py
class ASTAnalyzer(ast.NodeVisitor):
    """Enhanced AST analyzer for static Python code analysis."""

    def __init__(self) -> None:
        self.log = log.config(self.__class__.__name__)
        self.result = AnalysisResult()
        self.current_function: Optional[str] = None
        self.current_scope = "global"
        self.type_hints: dict[str, TypeInfo] = {}
        self.node_types: dict[ast.AST, NodeType] = {}

    def analyze(self, source_code: str) -> AnalysisResult:
        """Analyze Python source code and return analysis results."""
        try:
            tree = ast.parse(source_code)
            self.visit(tree)
            self._finalize_analysis()
            return self.result
        except SyntaxError as e:
            self.result.errors.append(f"Syntax error: {e}")
            self.result.convertible = False
            self.result.conversion_confidence = 0.0
            return self.result

    def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
        """Analyze function definitions."""
        self.node_types[node] = NodeType.FUNCTION_DEF

        func_info = FunctionInfo(name=node.name, line_count=node.end_lineno - node.lineno + 1 if node.end_lineno else 1)

        # Analyze return type annotation
        if node.returns:
            func_info.return_type = self._extract_type_info(node.returns)
        else:
            func_info.return_type = TypeInfo("void")

        # Analyze parameters
        for arg in node.args.args:
            if arg.annotation:
                type_info = self._extract_type_info(arg.annotation)
                param_info = VariableInfo(
                    name=arg.arg, type_info=type_info, scope=node.name, is_parameter=True, is_declared=True
                )
                func_info.parameters.append(param_info)
                func_info.local_variables[arg.arg] = param_info
            else:
                self.result.errors.append(f"Parameter '{arg.arg}' in function '{node.name}' lacks type annotation")
                self.result.convertible = False

        # Add function to result first
        self.result.functions[node.name] = func_info

        # Analyze function body
        old_function = self.current_function
        old_scope = self.current_scope
        self.current_function = node.name
        self.current_scope = node.name

        for stmt in node.body:
            self.visit(stmt)

        # Calculate complexity
        func_info.complexity = self._calculate_function_complexity(node)

        self.current_function = old_function
        self.current_scope = old_scope

    def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
        """Analyze annotated assignments (variable declarations)."""
        self.node_types[node] = NodeType.VARIABLE_DEF

        if isinstance(node.target, ast.Name):
            var_name = node.target.id
            type_info = self._extract_type_info(node.annotation)

            var_info = VariableInfo(
                name=var_name,
                type_info=type_info,
                scope=self.current_scope,
                is_declared=True,
                first_assignment_line=node.lineno,
            )

            if self.current_function:
                self.result.functions[self.current_function].local_variables[var_name] = var_info
            else:
                self.result.global_variables[var_name] = var_info

        self.generic_visit(node)

    def visit_Assign(self, node: ast.Assign) -> None:
        """Analyze regular assignments."""
        self.node_types[node] = NodeType.ASSIGNMENT

        for target in node.targets:
            if isinstance(target, ast.Name):
                var_name = target.id

                # Check if variable is declared
                var_info = self._get_variable_info(var_name)
                if not var_info:
                    # Create placeholder for type inference
                    # Local variables can be inferred, global variables still require annotations
                    if self.current_function:
                        # Create placeholder variable info for local variable
                        # Type will be inferred later by flow-sensitive analysis
                        var_info = VariableInfo(
                            name=var_name,
                            type_info=None,  # Will be inferred
                            is_parameter=False,
                            scope="local",
                        )
                        var_info.is_modified = True
                        var_info.usage_count = 1
                        self.result.functions[self.current_function].local_variables[var_name] = var_info
                    else:
                        # Global variables still require explicit annotation
                        self.result.errors.append(
                            f"Global variable '{var_name}' used without type annotation declaration"
                        )
                        self.result.convertible = False
                else:
                    var_info.is_modified = True
                    var_info.usage_count += 1

        self.generic_visit(node)

    def visit_Call(self, node: ast.Call) -> None:
        """Analyze function calls."""
        self.node_types[node] = NodeType.FUNCTION_CALL

        if isinstance(node.func, ast.Name):
            func_name = node.func.id
            if self.current_function:
                self.result.functions[self.current_function].calls_made.append(func_name)

        self.generic_visit(node)

    def visit_Return(self, node: ast.Return) -> None:
        """Analyze return statements."""
        self.node_types[node] = NodeType.RETURN
        self.generic_visit(node)

    def visit_If(self, node: ast.If) -> None:
        """Analyze if statements."""
        self.node_types[node] = NodeType.IF_STMT
        self.generic_visit(node)

    def visit_While(self, node: ast.While) -> None:
        """Analyze while loops."""
        self.node_types[node] = NodeType.WHILE_LOOP
        self.generic_visit(node)

    def visit_For(self, node: ast.For) -> None:
        """Analyze for loops."""
        self.node_types[node] = NodeType.FOR_LOOP

        # Check if it's a simple range-based loop
        if isinstance(node.iter, ast.Call) and isinstance(node.iter.func, ast.Name):
            if node.iter.func.id != "range":
                self.result.warnings.append(
                    f"For loop at line {node.lineno} uses non-range iterator, may require special handling"
                )

        self.generic_visit(node)

    def visit_Name(self, node: ast.Name) -> None:
        """Analyze name references."""
        self.node_types[node] = NodeType.NAME

        if isinstance(node.ctx, ast.Load):
            # Variable usage
            var_info = self._get_variable_info(node.id)
            if var_info:
                var_info.usage_count += 1

        self.generic_visit(node)

    def _extract_type_info(self, annotation: ast.expr) -> TypeInfo:
        """Extract type information from type annotations."""
        if isinstance(annotation, ast.Name):
            return TypeInfo(annotation.id)
        elif isinstance(annotation, ast.Constant):
            return TypeInfo(str(annotation.value))
        elif isinstance(annotation, ast.Subscript):
            # Handle generic types like list[int]
            if isinstance(annotation.value, ast.Name):
                base_type = annotation.value.id
                if base_type == "list":
                    return TypeInfo("list", c_equivalent="*")
                elif base_type == "Optional":
                    inner_type = self._extract_type_info(annotation.slice)
                    inner_type.is_nullable = True
                    return inner_type

        # Fallback for complex annotations
        return TypeInfo("unknown")

    def _get_variable_info(self, var_name: str) -> Optional[VariableInfo]:
        """Get variable information from current scope."""
        if self.current_function:
            func_info = self.result.functions.get(self.current_function)
            if func_info and var_name in func_info.local_variables:
                return func_info.local_variables[var_name]

        return self.result.global_variables.get(var_name)

    def _calculate_function_complexity(self, node: ast.FunctionDef) -> StaticComplexity:
        """Calculate the complexity level of a function."""
        complexity_score: float = 0.0

        # Count control flow structures
        for child in ast.walk(node):
            if isinstance(child, (ast.If, ast.While, ast.For)):
                complexity_score += 1.0
            elif isinstance(child, ast.Call):
                complexity_score += 0.5
            elif isinstance(child, (ast.ListComp, ast.DictComp, ast.GeneratorExp)):
                complexity_score += 2.0  # Comprehensions are complex

        # Map score to complexity level
        if complexity_score <= 1:
            return StaticComplexity.TRIVIAL
        elif complexity_score <= 3:
            return StaticComplexity.SIMPLE
        elif complexity_score <= 6:
            return StaticComplexity.MODERATE
        elif complexity_score <= 10:
            return StaticComplexity.COMPLEX
        else:
            return StaticComplexity.UNSUPPORTED

    def _finalize_analysis(self) -> None:
        """Finalize the analysis with overall assessments."""
        # Calculate overall complexity
        max_complexity = StaticComplexity.TRIVIAL
        for func_info in self.result.functions.values():
            if func_info.complexity.value > max_complexity.value:
                max_complexity = func_info.complexity

        self.result.complexity = max_complexity

        # Calculate conversion confidence
        error_count = len(self.result.errors)
        warning_count = len(self.result.warnings)

        if error_count > 0:
            self.result.conversion_confidence = max(0.0, 1.0 - (error_count * 0.3))
        elif warning_count > 0:
            self.result.conversion_confidence = max(0.7, 1.0 - (warning_count * 0.1))

        # Determine if code is convertible
        # Only actual errors block conversion; high complexity is a warning, not a blocker
        if error_count > 0:
            self.result.convertible = False

analyze(source_code)

Analyze Python source code and return analysis results.

Source code in src/multigen/frontend/ast_analyzer.py
def analyze(self, source_code: str) -> AnalysisResult:
    """Analyze Python source code and return analysis results."""
    try:
        tree = ast.parse(source_code)
        self.visit(tree)
        self._finalize_analysis()
        return self.result
    except SyntaxError as e:
        self.result.errors.append(f"Syntax error: {e}")
        self.result.convertible = False
        self.result.conversion_confidence = 0.0
        return self.result

visit_AnnAssign(node)

Analyze annotated assignments (variable declarations).

Source code in src/multigen/frontend/ast_analyzer.py
def visit_AnnAssign(self, node: ast.AnnAssign) -> None:
    """Analyze annotated assignments (variable declarations)."""
    self.node_types[node] = NodeType.VARIABLE_DEF

    if isinstance(node.target, ast.Name):
        var_name = node.target.id
        type_info = self._extract_type_info(node.annotation)

        var_info = VariableInfo(
            name=var_name,
            type_info=type_info,
            scope=self.current_scope,
            is_declared=True,
            first_assignment_line=node.lineno,
        )

        if self.current_function:
            self.result.functions[self.current_function].local_variables[var_name] = var_info
        else:
            self.result.global_variables[var_name] = var_info

    self.generic_visit(node)

visit_Assign(node)

Analyze regular assignments.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_Assign(self, node: ast.Assign) -> None:
    """Analyze regular assignments."""
    self.node_types[node] = NodeType.ASSIGNMENT

    for target in node.targets:
        if isinstance(target, ast.Name):
            var_name = target.id

            # Check if variable is declared
            var_info = self._get_variable_info(var_name)
            if not var_info:
                # Create placeholder for type inference
                # Local variables can be inferred, global variables still require annotations
                if self.current_function:
                    # Create placeholder variable info for local variable
                    # Type will be inferred later by flow-sensitive analysis
                    var_info = VariableInfo(
                        name=var_name,
                        type_info=None,  # Will be inferred
                        is_parameter=False,
                        scope="local",
                    )
                    var_info.is_modified = True
                    var_info.usage_count = 1
                    self.result.functions[self.current_function].local_variables[var_name] = var_info
                else:
                    # Global variables still require explicit annotation
                    self.result.errors.append(
                        f"Global variable '{var_name}' used without type annotation declaration"
                    )
                    self.result.convertible = False
            else:
                var_info.is_modified = True
                var_info.usage_count += 1

    self.generic_visit(node)

visit_Call(node)

Analyze function calls.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_Call(self, node: ast.Call) -> None:
    """Analyze function calls."""
    self.node_types[node] = NodeType.FUNCTION_CALL

    if isinstance(node.func, ast.Name):
        func_name = node.func.id
        if self.current_function:
            self.result.functions[self.current_function].calls_made.append(func_name)

    self.generic_visit(node)

visit_For(node)

Analyze for loops.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_For(self, node: ast.For) -> None:
    """Analyze for loops."""
    self.node_types[node] = NodeType.FOR_LOOP

    # Check if it's a simple range-based loop
    if isinstance(node.iter, ast.Call) and isinstance(node.iter.func, ast.Name):
        if node.iter.func.id != "range":
            self.result.warnings.append(
                f"For loop at line {node.lineno} uses non-range iterator, may require special handling"
            )

    self.generic_visit(node)

visit_FunctionDef(node)

Analyze function definitions.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_FunctionDef(self, node: ast.FunctionDef) -> None:
    """Analyze function definitions."""
    self.node_types[node] = NodeType.FUNCTION_DEF

    func_info = FunctionInfo(name=node.name, line_count=node.end_lineno - node.lineno + 1 if node.end_lineno else 1)

    # Analyze return type annotation
    if node.returns:
        func_info.return_type = self._extract_type_info(node.returns)
    else:
        func_info.return_type = TypeInfo("void")

    # Analyze parameters
    for arg in node.args.args:
        if arg.annotation:
            type_info = self._extract_type_info(arg.annotation)
            param_info = VariableInfo(
                name=arg.arg, type_info=type_info, scope=node.name, is_parameter=True, is_declared=True
            )
            func_info.parameters.append(param_info)
            func_info.local_variables[arg.arg] = param_info
        else:
            self.result.errors.append(f"Parameter '{arg.arg}' in function '{node.name}' lacks type annotation")
            self.result.convertible = False

    # Add function to result first
    self.result.functions[node.name] = func_info

    # Analyze function body
    old_function = self.current_function
    old_scope = self.current_scope
    self.current_function = node.name
    self.current_scope = node.name

    for stmt in node.body:
        self.visit(stmt)

    # Calculate complexity
    func_info.complexity = self._calculate_function_complexity(node)

    self.current_function = old_function
    self.current_scope = old_scope

visit_If(node)

Analyze if statements.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_If(self, node: ast.If) -> None:
    """Analyze if statements."""
    self.node_types[node] = NodeType.IF_STMT
    self.generic_visit(node)

visit_Name(node)

Analyze name references.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_Name(self, node: ast.Name) -> None:
    """Analyze name references."""
    self.node_types[node] = NodeType.NAME

    if isinstance(node.ctx, ast.Load):
        # Variable usage
        var_info = self._get_variable_info(node.id)
        if var_info:
            var_info.usage_count += 1

    self.generic_visit(node)

visit_Return(node)

Analyze return statements.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_Return(self, node: ast.Return) -> None:
    """Analyze return statements."""
    self.node_types[node] = NodeType.RETURN
    self.generic_visit(node)

visit_While(node)

Analyze while loops.

Source code in src/multigen/frontend/ast_analyzer.py
def visit_While(self, node: ast.While) -> None:
    """Analyze while loops."""
    self.node_types[node] = NodeType.WHILE_LOOP
    self.generic_visit(node)

AnalysisResult dataclass

Result of AST analysis.

Source code in src/multigen/frontend/ast_analyzer.py
@dataclass
class AnalysisResult:
    """Result of AST analysis."""

    functions: dict[str, FunctionInfo] = field(default_factory=dict)
    global_variables: dict[str, VariableInfo] = field(default_factory=dict)
    imports: list[str] = field(default_factory=list)
    complexity: StaticComplexity = StaticComplexity.SIMPLE
    errors: list[str] = field(default_factory=list)
    warnings: list[str] = field(default_factory=list)
    convertible: bool = True
    conversion_confidence: float = 1.0

FunctionInfo dataclass

Information about a function in static analysis.

Source code in src/multigen/frontend/ast_analyzer.py
@dataclass
class FunctionInfo:
    """Information about a function in static analysis."""

    name: str
    parameters: list[VariableInfo] = field(default_factory=list)
    return_type: Optional[TypeInfo] = None
    local_variables: dict[str, VariableInfo] = field(default_factory=dict)
    complexity: StaticComplexity = StaticComplexity.SIMPLE
    calls_made: list[str] = field(default_factory=list)
    line_count: int = 0
    has_side_effects: bool = False

NodeType

Bases: Enum

Types of AST nodes we can analyze.

Source code in src/multigen/frontend/ast_analyzer.py
class NodeType(Enum):
    """Types of AST nodes we can analyze."""

    FUNCTION_DEF = "function_def"
    CLASS_DEF = "class_def"
    VARIABLE_DEF = "variable_def"
    ASSIGNMENT = "assignment"
    FUNCTION_CALL = "function_call"
    RETURN = "return"
    IF_STMT = "if_stmt"
    WHILE_LOOP = "while_loop"
    FOR_LOOP = "for_loop"
    BINARY_OP = "binary_op"
    UNARY_OP = "unary_op"
    CONSTANT = "constant"
    NAME = "name"

StaticComplexity

Bases: Enum

Complexity levels for static analysis.

Source code in src/multigen/frontend/ast_analyzer.py
class StaticComplexity(Enum):
    """Complexity levels for static analysis."""

    TRIVIAL = 1  # Simple constants, basic operations
    SIMPLE = 2  # Function calls, basic control flow
    MODERATE = 3  # Complex control flow, multiple variables
    COMPLEX = 4  # Advanced patterns, nested structures
    UNSUPPORTED = 5  # Dynamic features not convertible

TypeInfo dataclass

Information about a type in the static analysis.

Source code in src/multigen/frontend/ast_analyzer.py
@dataclass
class TypeInfo:
    """Information about a type in the static analysis."""

    name: str
    python_type: Optional[type] = None
    c_equivalent: Optional[str] = None
    is_mutable: bool = True
    is_nullable: bool = False
    constraints: list[str] = field(default_factory=list)

    def __post_init__(self) -> None:
        """Set up C equivalent mappings."""
        if not self.c_equivalent:
            self.c_equivalent = self._map_to_c_type()

    def _map_to_c_type(self) -> str:
        """Map Python type to C equivalent."""
        type_mapping = {
            "int": "int",
            "float": "double",
            "bool": "bool",
            "str": "char*",
            "NoneType": "void",
        }
        return type_mapping.get(self.name, "void*")

__post_init__()

Set up C equivalent mappings.

Source code in src/multigen/frontend/ast_analyzer.py
def __post_init__(self) -> None:
    """Set up C equivalent mappings."""
    if not self.c_equivalent:
        self.c_equivalent = self._map_to_c_type()

VariableInfo dataclass

Information about a variable in static analysis.

Source code in src/multigen/frontend/ast_analyzer.py
@dataclass
class VariableInfo:
    """Information about a variable in static analysis."""

    name: str
    type_info: Optional[TypeInfo]
    scope: str
    is_parameter: bool = False
    is_declared: bool = False
    first_assignment_line: Optional[int] = None
    usage_count: int = 0
    is_modified: bool = False

analyze_python_code(source_code)

Convenience function to analyze Python code.

Source code in src/multigen/frontend/ast_analyzer.py
def analyze_python_code(source_code: str) -> AnalysisResult:
    """Convenience function to analyze Python code."""
    analyzer = ASTAnalyzer()
    return analyzer.analyze(source_code)

analyze_python_file(file_path)

Analyze a Python file.

Source code in src/multigen/frontend/ast_analyzer.py
def analyze_python_file(file_path: str) -> AnalysisResult:
    """Analyze a Python file."""
    with open(file_path, encoding="utf-8") as f:
        source_code = f.read()
    return analyze_python_code(source_code)

Type Inference

multigen.frontend.type_inference

Type Inference System for Static Python Analysis.

This module provides advanced type inference capabilities that go beyond simple type annotations to infer types from context, usage patterns, and static analysis.

InferenceMethod

Bases: Enum

Methods used for type inference.

Source code in src/multigen/frontend/type_inference.py
class InferenceMethod(Enum):
    """Methods used for type inference."""

    ANNOTATION = "annotation"  # Explicit type annotation
    LITERAL = "literal"  # Inferred from literal value
    ASSIGNMENT = "assignment"  # Inferred from assignment
    OPERATION = "operation"  # Inferred from operations
    FUNCTION_RETURN = "function_return"  # Inferred from function return
    PARAMETER_USAGE = "parameter_usage"  # Inferred from parameter usage
    CONTEXT = "context"  # Inferred from surrounding context

InferenceResult dataclass

Result of type inference for a single expression.

Source code in src/multigen/frontend/type_inference.py
@dataclass
class InferenceResult:
    """Result of type inference for a single expression."""

    type_info: TypeInfo
    confidence: float  # 0.0 to 1.0
    method: InferenceMethod
    evidence: list[str] = field(default_factory=list)
    alternatives: list[TypeInfo] = field(default_factory=list)

    @property
    def c_type(self) -> str:
        """Get C type equivalent from type_info."""
        return self.type_info.c_equivalent or "void*"

    @property
    def python_type(self) -> str:
        """Get Python type name from type_info."""
        return self.type_info.name

c_type property

Get C type equivalent from type_info.

python_type property

Get Python type name from type_info.

TypeConstraint dataclass

A constraint on a type based on usage.

Source code in src/multigen/frontend/type_inference.py
@dataclass
class TypeConstraint:
    """A constraint on a type based on usage."""

    variable_name: str
    constraint_type: str  # "must_be", "cannot_be", "must_support"
    constraint_value: Any
    source_line: int
    evidence: str

TypeInferenceEngine

Advanced type inference engine for static Python code.

Source code in src/multigen/frontend/type_inference.py
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
class TypeInferenceEngine:
    """Advanced type inference engine for static Python code."""

    def __init__(self, enable_flow_sensitive: bool = True):
        self.log = log.config(self.__class__.__name__)
        self.inferred_types: dict[str, InferenceResult] = {}
        self.constraints: list[TypeConstraint] = []
        self.enable_flow_sensitive = enable_flow_sensitive

        # Lazy import to avoid circular dependency
        self._flow_sensitive_inferencer: Optional[FlowSensitiveInferencer] = None
        self.binary_op_result_types = {
            # (left_type, operator, right_type) -> result_type
            ("int", ast.Add, "int"): "int",
            ("int", ast.Sub, "int"): "int",
            ("int", ast.Mult, "int"): "int",
            ("int", ast.Div, "int"): "double",  # Division always returns float in Python 3
            ("int", ast.FloorDiv, "int"): "int",
            ("int", ast.Mod, "int"): "int",
            ("int", ast.Pow, "int"): "int",
            ("double", ast.Add, "double"): "double",
            ("double", ast.Sub, "double"): "double",
            ("double", ast.Mult, "double"): "double",
            ("double", ast.Div, "double"): "double",
            ("double", ast.FloorDiv, "double"): "double",
            ("double", ast.Mod, "double"): "double",
            ("double", ast.Pow, "double"): "double",
            # Mixed int/float operations
            ("int", ast.Add, "double"): "double",
            ("double", ast.Add, "int"): "double",
            ("int", ast.Mult, "double"): "double",
            ("double", ast.Mult, "int"): "double",
            # String operations
            ("char*", ast.Add, "char*"): "char*",
            ("char*", ast.Mult, "int"): "char*",
            # Comparison operations
            ("int", ast.Lt, "int"): "bool",
            ("int", ast.Gt, "int"): "bool",
            ("int", ast.Eq, "int"): "bool",
            ("double", ast.Lt, "double"): "bool",
            ("char*", ast.Eq, "char*"): "bool",
        }

    def infer_expression_type(self, node: ast.expr, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer the type of an expression."""
        if isinstance(node, ast.Constant):
            return self._infer_constant_type(node)
        elif isinstance(node, ast.Name):
            return self._infer_name_type(node, context)
        elif isinstance(node, ast.BinOp):
            return self._infer_binop_type(node, context)
        elif isinstance(node, ast.UnaryOp):
            return self._infer_unaryop_type(node, context)
        elif isinstance(node, ast.Call):
            return self._infer_call_type(node, context)
        elif isinstance(node, ast.Compare):
            return self._infer_compare_type(node, context)
        elif isinstance(node, ast.List):
            return self._infer_list_type(node, context)
        elif isinstance(node, ast.Tuple):
            return self._infer_tuple_type(node, context)
        else:
            # Unknown expression type
            return InferenceResult(
                type_info=TypeInfo("unknown"),
                confidence=0.0,
                method=InferenceMethod.CONTEXT,
                evidence=[f"Unknown expression type: {type(node).__name__}"],
            )

    def _infer_constant_type(self, node: ast.Constant) -> InferenceResult:
        """Infer type from constant literals."""
        value = node.value
        if isinstance(value, bool):
            type_info = TypeInfo("bool")
        elif isinstance(value, int):
            type_info = TypeInfo("int")
        elif isinstance(value, float):
            type_info = TypeInfo("float")
        elif isinstance(value, str):
            type_info = TypeInfo("str")
        elif value is None:
            type_info = TypeInfo("NoneType")
        else:
            type_info = TypeInfo("unknown")

        return InferenceResult(
            type_info=type_info,
            confidence=1.0,
            method=InferenceMethod.LITERAL,
            evidence=[f"Literal value: {repr(value)}"],
        )

    def _infer_name_type(self, node: ast.Name, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from variable name."""
        var_name = node.id

        if var_name in context:
            return InferenceResult(
                type_info=context[var_name],
                confidence=1.0,
                method=InferenceMethod.ANNOTATION,
                evidence=[f"Variable '{var_name}' has explicit type annotation"],
            )

        # Try to infer from previous assignments or usage
        if var_name in self.inferred_types:
            prev_result = self.inferred_types[var_name]
            return InferenceResult(
                type_info=prev_result.type_info,
                confidence=prev_result.confidence * 0.8,  # Slightly lower confidence
                method=InferenceMethod.CONTEXT,
                evidence=[f"Previously inferred type for '{var_name}'"],
            )

        # Unknown variable
        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.CONTEXT,
            evidence=[f"Unknown variable: {var_name}"],
        )

    def _infer_binop_type(self, node: ast.BinOp, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from binary operations."""
        left_result = self.infer_expression_type(node.left, context)
        right_result = self.infer_expression_type(node.right, context)

        # Look up result type in our mapping
        op_type = type(node.op)
        left_c = left_result.type_info.c_equivalent if left_result.type_info.c_equivalent else "unknown"
        right_c = right_result.type_info.c_equivalent if right_result.type_info.c_equivalent else "unknown"
        key = (left_c, op_type, right_c)

        if key in self.binary_op_result_types:
            result_c_type = self.binary_op_result_types[key]
            result_type = TypeInfo("inferred", c_equivalent=result_c_type)
            confidence = min(left_result.confidence, right_result.confidence) * 0.9

            return InferenceResult(
                type_info=result_type,
                confidence=confidence,
                method=InferenceMethod.OPERATION,
                evidence=[
                    f"Binary operation: {left_result.type_info.c_equivalent} "
                    f"{op_type.__name__} {right_result.type_info.c_equivalent}"
                ],
            )

        # Special handling for unknown operations
        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.OPERATION,
            evidence=[f"Unknown binary operation: {op_type.__name__}"],
        )

    def _infer_unaryop_type(self, node: ast.UnaryOp, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from unary operations."""
        operand_result = self.infer_expression_type(node.operand, context)

        # Most unary operations preserve the operand type
        if isinstance(node.op, ast.UAdd) or isinstance(node.op, ast.USub):
            # +x or -x preserves numeric type
            if operand_result.type_info.c_equivalent in ["int", "double"]:
                return InferenceResult(
                    type_info=operand_result.type_info,
                    confidence=operand_result.confidence,
                    method=InferenceMethod.OPERATION,
                    evidence=[f"Unary {type(node.op).__name__} preserves type"],
                )
        elif isinstance(node.op, ast.Not):
            # not x always returns bool
            return InferenceResult(
                type_info=TypeInfo("bool"),
                confidence=1.0,
                method=InferenceMethod.OPERATION,
                evidence=["Unary 'not' always returns bool"],
            )

        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.OPERATION,
            evidence=[f"Unknown unary operation: {type(node.op).__name__}"],
        )

    def _infer_call_type(self, node: ast.Call, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from function calls."""
        if isinstance(node.func, ast.Name):
            func_name = node.func.id

            # Built-in function type mappings
            builtin_returns = {
                "len": TypeInfo("int"),
                "str": TypeInfo("str"),
                "int": TypeInfo("int"),
                "float": TypeInfo("float"),
                "bool": TypeInfo("bool"),
                "abs": None,  # Depends on argument
                "max": None,  # Depends on arguments
                "min": None,  # Depends on arguments
                "sum": None,  # Depends on arguments
            }

            if func_name in builtin_returns:
                return_type = builtin_returns[func_name]
                if return_type:
                    return InferenceResult(
                        type_info=return_type,
                        confidence=1.0,
                        method=InferenceMethod.FUNCTION_RETURN,
                        evidence=[f"Built-in function '{func_name}' return type"],
                    )

            # For functions that return the type of their argument
            if func_name in ["abs", "max", "min"] and node.args:
                arg_result = self.infer_expression_type(node.args[0], context)
                return InferenceResult(
                    type_info=arg_result.type_info,
                    confidence=arg_result.confidence * 0.9,
                    method=InferenceMethod.FUNCTION_RETURN,
                    evidence=[f"Function '{func_name}' returns argument type"],
                )

        # Unknown function call
        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.FUNCTION_RETURN,
            evidence=["Unknown function call"],
        )

    def _infer_compare_type(self, node: ast.Compare, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from comparison operations."""
        # All comparisons return bool
        return InferenceResult(
            type_info=TypeInfo("bool"),
            confidence=1.0,
            method=InferenceMethod.OPERATION,
            evidence=["Comparison operations always return bool"],
        )

    def _infer_list_type(self, node: ast.List, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from list literals."""
        if not node.elts:
            # Empty list - can't infer element type
            return InferenceResult(
                type_info=TypeInfo("list", c_equivalent="void*"),
                confidence=0.5,
                method=InferenceMethod.LITERAL,
                evidence=["Empty list - unknown element type"],
            )

        # Infer element type from first element
        first_elem_result = self.infer_expression_type(node.elts[0], context)
        return InferenceResult(
            type_info=TypeInfo("list", c_equivalent=f"{first_elem_result.type_info.c_equivalent}*"),
            confidence=first_elem_result.confidence * 0.8,
            method=InferenceMethod.LITERAL,
            evidence=[f"List with {first_elem_result.type_info.c_equivalent} elements"],
        )

    def _infer_tuple_type(self, node: ast.Tuple, context: dict[str, TypeInfo]) -> InferenceResult:
        """Infer type from tuple literals."""
        element_types = []
        min_confidence = 1.0

        for elem in node.elts:
            elem_result = self.infer_expression_type(elem, context)
            c_equiv = elem_result.type_info.c_equivalent if elem_result.type_info.c_equivalent else "unknown"
            element_types.append(c_equiv)
            min_confidence = min(min_confidence, elem_result.confidence)

        return InferenceResult(
            type_info=TypeInfo("tuple", c_equivalent="struct"),
            confidence=min_confidence * 0.9,
            method=InferenceMethod.LITERAL,
            evidence=[f"Tuple with elements: {', '.join(element_types)}"],
        )

    def analyze_function_signature(self, func_node: ast.FunctionDef) -> dict[str, InferenceResult]:
        """Analyze and infer types for a complete function signature."""
        results = {}

        # Analyze parameters
        for arg in func_node.args.args:
            if arg.annotation:
                # Explicit annotation
                type_info = self._extract_type_from_annotation(arg.annotation)
                results[arg.arg] = InferenceResult(
                    type_info=type_info,
                    confidence=1.0,
                    method=InferenceMethod.ANNOTATION,
                    evidence=["Explicit type annotation"],
                )
            else:
                # Try to infer from usage
                usage_result = self._infer_parameter_from_usage(arg.arg, func_node)
                results[arg.arg] = usage_result

        # Analyze return type
        if func_node.returns:
            return_type_info = self._extract_type_from_annotation(func_node.returns)
            results["__return__"] = InferenceResult(
                type_info=return_type_info,
                confidence=1.0,
                method=InferenceMethod.ANNOTATION,
                evidence=["Explicit return type annotation"],
            )
        else:
            # Infer from return statements
            return_result = self._infer_return_type_from_statements(func_node)
            results["__return__"] = return_result

        return results

    def analyze_function_signature_enhanced(self, func_node: ast.FunctionDef) -> dict[str, InferenceResult]:
        """Enhanced function analysis with optional flow-sensitive inference."""
        if self.enable_flow_sensitive:
            return self._get_flow_sensitive_results(func_node)
        else:
            return self.analyze_function_signature(func_node)

    def _get_flow_sensitive_results(self, func_node: ast.FunctionDef) -> dict[str, InferenceResult]:
        """Get results from flow-sensitive analysis."""
        if self._flow_sensitive_inferencer is None:
            # Lazy import to avoid circular dependency
            from .flow_sensitive_inference import FlowSensitiveInferencer

            self._flow_sensitive_inferencer = FlowSensitiveInferencer(self)

        try:
            # Try flow-sensitive analysis first
            flow_results = self._flow_sensitive_inferencer.analyze_function_flow(func_node)
            self.log.debug(f"Flow-sensitive analysis completed for {func_node.name}")
            return flow_results
        except Exception as e:
            # Fall back to regular analysis if flow-sensitive fails
            self.log.warning(f"Flow-sensitive analysis failed for {func_node.name}: {e}")
            return self.analyze_function_signature(func_node)

    def _extract_type_from_annotation(self, annotation: ast.expr) -> TypeInfo:
        """Extract TypeInfo from AST annotation node."""
        if isinstance(annotation, ast.Name):
            return TypeInfo(annotation.id)
        elif isinstance(annotation, ast.Constant):
            return TypeInfo(str(annotation.value))
        elif isinstance(annotation, ast.Subscript):
            # Handle generic types
            if isinstance(annotation.value, ast.Name):
                base_type = annotation.value.id
                if base_type == "list":
                    return TypeInfo("list", c_equivalent="*")
                elif base_type == "dict":
                    return TypeInfo("dict", c_equivalent="*")
                elif base_type == "Optional":
                    inner_type = self._extract_type_from_annotation(annotation.slice)
                    inner_type.is_nullable = True
                    return inner_type

        return TypeInfo("unknown")

    def _infer_parameter_from_usage(self, param_name: str, func_node: ast.FunctionDef) -> InferenceResult:
        """Infer parameter type from its usage within the function."""
        # Analyze how the parameter is used
        usage_patterns = []

        for node in ast.walk(func_node):
            if isinstance(node, ast.BinOp):
                if isinstance(node.left, ast.Name) and node.left.id == param_name:
                    usage_patterns.append(f"Used in binary operation: {type(node.op).__name__}")
                elif isinstance(node.right, ast.Name) and node.right.id == param_name:
                    usage_patterns.append(f"Used in binary operation: {type(node.op).__name__}")

        # Simple heuristics for type inference
        if any("Add" in pattern or "Sub" in pattern for pattern in usage_patterns):
            return InferenceResult(
                type_info=TypeInfo("int"),  # Assume numeric
                confidence=0.7,
                method=InferenceMethod.PARAMETER_USAGE,
                evidence=usage_patterns,
            )

        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.PARAMETER_USAGE,
            evidence=["No clear usage pattern found"],
        )

    def _infer_return_type_from_statements(self, func_node: ast.FunctionDef) -> InferenceResult:
        """Infer return type from return statements in the function."""
        return_types = []

        for node in ast.walk(func_node):
            if isinstance(node, ast.Return) and node.value:
                # Found a return statement with a value
                return_result = self.infer_expression_type(node.value, {})
                return_types.append(return_result)

        if not return_types:
            # No return statements found
            return InferenceResult(
                type_info=TypeInfo("void"),
                confidence=1.0,
                method=InferenceMethod.FUNCTION_RETURN,
                evidence=["No return statements found"],
            )

        # If all return types are the same, use that
        if len(set(rt.type_info.name for rt in return_types)) == 1:
            first_result = return_types[0]
            avg_confidence = sum(rt.confidence for rt in return_types) / len(return_types)
            return InferenceResult(
                type_info=first_result.type_info,
                confidence=avg_confidence,
                method=InferenceMethod.FUNCTION_RETURN,
                evidence=[f"Consistent return type across {len(return_types)} return statements"],
            )

        # Multiple different return types - use the most common or most confident
        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.FUNCTION_RETURN,
            evidence=["Inconsistent return types found"],
        )

analyze_function_signature(func_node)

Analyze and infer types for a complete function signature.

Source code in src/multigen/frontend/type_inference.py
def analyze_function_signature(self, func_node: ast.FunctionDef) -> dict[str, InferenceResult]:
    """Analyze and infer types for a complete function signature."""
    results = {}

    # Analyze parameters
    for arg in func_node.args.args:
        if arg.annotation:
            # Explicit annotation
            type_info = self._extract_type_from_annotation(arg.annotation)
            results[arg.arg] = InferenceResult(
                type_info=type_info,
                confidence=1.0,
                method=InferenceMethod.ANNOTATION,
                evidence=["Explicit type annotation"],
            )
        else:
            # Try to infer from usage
            usage_result = self._infer_parameter_from_usage(arg.arg, func_node)
            results[arg.arg] = usage_result

    # Analyze return type
    if func_node.returns:
        return_type_info = self._extract_type_from_annotation(func_node.returns)
        results["__return__"] = InferenceResult(
            type_info=return_type_info,
            confidence=1.0,
            method=InferenceMethod.ANNOTATION,
            evidence=["Explicit return type annotation"],
        )
    else:
        # Infer from return statements
        return_result = self._infer_return_type_from_statements(func_node)
        results["__return__"] = return_result

    return results

analyze_function_signature_enhanced(func_node)

Enhanced function analysis with optional flow-sensitive inference.

Source code in src/multigen/frontend/type_inference.py
def analyze_function_signature_enhanced(self, func_node: ast.FunctionDef) -> dict[str, InferenceResult]:
    """Enhanced function analysis with optional flow-sensitive inference."""
    if self.enable_flow_sensitive:
        return self._get_flow_sensitive_results(func_node)
    else:
        return self.analyze_function_signature(func_node)

infer_expression_type(node, context)

Infer the type of an expression.

Source code in src/multigen/frontend/type_inference.py
def infer_expression_type(self, node: ast.expr, context: dict[str, TypeInfo]) -> InferenceResult:
    """Infer the type of an expression."""
    if isinstance(node, ast.Constant):
        return self._infer_constant_type(node)
    elif isinstance(node, ast.Name):
        return self._infer_name_type(node, context)
    elif isinstance(node, ast.BinOp):
        return self._infer_binop_type(node, context)
    elif isinstance(node, ast.UnaryOp):
        return self._infer_unaryop_type(node, context)
    elif isinstance(node, ast.Call):
        return self._infer_call_type(node, context)
    elif isinstance(node, ast.Compare):
        return self._infer_compare_type(node, context)
    elif isinstance(node, ast.List):
        return self._infer_list_type(node, context)
    elif isinstance(node, ast.Tuple):
        return self._infer_tuple_type(node, context)
    else:
        # Unknown expression type
        return InferenceResult(
            type_info=TypeInfo("unknown"),
            confidence=0.0,
            method=InferenceMethod.CONTEXT,
            evidence=[f"Unknown expression type: {type(node).__name__}"],
        )

Intelligence Layer

Base Classes

multigen.frontend.base

Base classes for the Intelligence Layer analyzers, optimizers, and verifiers.

AnalysisContext dataclass

Context information for analysis operations.

Source code in src/multigen/frontend/base.py
@dataclass
class AnalysisContext:
    """Context information for analysis operations."""

    source_code: str
    ast_node: ast.AST
    analysis_result: Optional[AnalysisResult] = None
    analysis_level: AnalysisLevel = AnalysisLevel.BASIC
    optimization_level: OptimizationLevel = OptimizationLevel.BASIC
    target_architecture: str = "x86_64"
    metadata: Optional[dict[str, Any]] = None

    def __post_init__(self) -> None:
        """Initialize metadata dictionary if not provided."""
        if self.metadata is None:
            self.metadata = {}

__post_init__()

Initialize metadata dictionary if not provided.

Source code in src/multigen/frontend/base.py
def __post_init__(self) -> None:
    """Initialize metadata dictionary if not provided."""
    if self.metadata is None:
        self.metadata = {}

AnalysisLevel

Bases: Enum

Levels of analysis depth.

Source code in src/multigen/frontend/base.py
class AnalysisLevel(Enum):
    """Levels of analysis depth."""

    BASIC = "basic"
    INTERMEDIATE = "intermediate"
    ADVANCED = "advanced"
    COMPREHENSIVE = "comprehensive"

AnalysisReport dataclass

Base class for analysis reports.

Source code in src/multigen/frontend/base.py
@dataclass
class AnalysisReport:
    """Base class for analysis reports."""

    analyzer_name: str
    success: bool
    confidence: float  # 0.0 to 1.0
    findings: list[str]
    warnings: list[str]
    errors: list[str]
    metadata: dict[str, Any]
    execution_time_ms: float = 0.0

    def has_issues(self) -> bool:
        """Check if the analysis found any issues."""
        return len(self.errors) > 0 or len(self.warnings) > 0

    def is_reliable(self, threshold: float = 0.7) -> bool:
        """Check if the analysis is reliable based on confidence threshold."""
        return self.success and self.confidence >= threshold

has_issues()

Check if the analysis found any issues.

Source code in src/multigen/frontend/base.py
def has_issues(self) -> bool:
    """Check if the analysis found any issues."""
    return len(self.errors) > 0 or len(self.warnings) > 0

is_reliable(threshold=0.7)

Check if the analysis is reliable based on confidence threshold.

Source code in src/multigen/frontend/base.py
def is_reliable(self, threshold: float = 0.7) -> bool:
    """Check if the analysis is reliable based on confidence threshold."""
    return self.success and self.confidence >= threshold

BaseAnalyzer

Bases: ABC

Base class for all analyzers in the intelligence layer.

Source code in src/multigen/frontend/base.py
class BaseAnalyzer(ABC):
    """Base class for all analyzers in the intelligence layer."""

    def __init__(self, name: str, analysis_level: AnalysisLevel = AnalysisLevel.BASIC):
        self.log = log.config(self.__class__.__name__)
        self.name = name
        self.analysis_level = analysis_level
        self._cache: dict[str, Any] = {}

    @abstractmethod
    def analyze(self, context: AnalysisContext) -> AnalysisReport:
        """Perform analysis on the given context.

        Args:
            context: The analysis context containing code and metadata

        Returns:
            AnalysisReport containing the results of the analysis
        """
        pass

    def can_analyze(self, context: AnalysisContext) -> bool:
        """Check if this analyzer can handle the given context.

        Args:
            context: The analysis context to check

        Returns:
            True if this analyzer can process the context
        """
        return True

    def clear_cache(self) -> None:
        """Clear the analyzer's cache."""
        self._cache.clear()

    def _get_cache_key(self, context: AnalysisContext) -> str:
        """Generate a cache key for the given context."""
        import hashlib

        key_data = f"{context.source_code}:{self.analysis_level.value}"
        return hashlib.sha256(key_data.encode()).hexdigest()

analyze(context) abstractmethod

Perform analysis on the given context.

Parameters:

Name Type Description Default
context AnalysisContext

The analysis context containing code and metadata

required

Returns:

Type Description
AnalysisReport

AnalysisReport containing the results of the analysis

Source code in src/multigen/frontend/base.py
@abstractmethod
def analyze(self, context: AnalysisContext) -> AnalysisReport:
    """Perform analysis on the given context.

    Args:
        context: The analysis context containing code and metadata

    Returns:
        AnalysisReport containing the results of the analysis
    """
    pass

can_analyze(context)

Check if this analyzer can handle the given context.

Parameters:

Name Type Description Default
context AnalysisContext

The analysis context to check

required

Returns:

Type Description
bool

True if this analyzer can process the context

Source code in src/multigen/frontend/base.py
def can_analyze(self, context: AnalysisContext) -> bool:
    """Check if this analyzer can handle the given context.

    Args:
        context: The analysis context to check

    Returns:
        True if this analyzer can process the context
    """
    return True

clear_cache()

Clear the analyzer's cache.

Source code in src/multigen/frontend/base.py
def clear_cache(self) -> None:
    """Clear the analyzer's cache."""
    self._cache.clear()

BaseOptimizer

Bases: ABC

Base class for all optimizers in the intelligence layer.

Source code in src/multigen/frontend/base.py
class BaseOptimizer(ABC):
    """Base class for all optimizers in the intelligence layer."""

    def __init__(self, name: str, optimization_level: OptimizationLevel = OptimizationLevel.BASIC):
        self.name = name
        self.optimization_level = optimization_level
        self._enabled = True

    @abstractmethod
    def optimize(self, context: AnalysisContext) -> "OptimizationResult":
        """Perform optimization on the given context.

        Args:
            context: The analysis context to optimize

        Returns:
            OptimizationResult containing the optimized representation
        """
        pass

    def can_optimize(self, context: AnalysisContext) -> bool:
        """Check if this optimizer can handle the given context.

        Args:
            context: The analysis context to check

        Returns:
            True if this optimizer can process the context
        """
        return self._enabled

    def enable(self) -> None:
        """Enable this optimizer."""
        self._enabled = True

    def disable(self) -> None:
        """Disable this optimizer."""
        self._enabled = False

can_optimize(context)

Check if this optimizer can handle the given context.

Parameters:

Name Type Description Default
context AnalysisContext

The analysis context to check

required

Returns:

Type Description
bool

True if this optimizer can process the context

Source code in src/multigen/frontend/base.py
def can_optimize(self, context: AnalysisContext) -> bool:
    """Check if this optimizer can handle the given context.

    Args:
        context: The analysis context to check

    Returns:
        True if this optimizer can process the context
    """
    return self._enabled

disable()

Disable this optimizer.

Source code in src/multigen/frontend/base.py
def disable(self) -> None:
    """Disable this optimizer."""
    self._enabled = False

enable()

Enable this optimizer.

Source code in src/multigen/frontend/base.py
def enable(self) -> None:
    """Enable this optimizer."""
    self._enabled = True

optimize(context) abstractmethod

Perform optimization on the given context.

Parameters:

Name Type Description Default
context AnalysisContext

The analysis context to optimize

required

Returns:

Type Description
OptimizationResult

OptimizationResult containing the optimized representation

Source code in src/multigen/frontend/base.py
@abstractmethod
def optimize(self, context: AnalysisContext) -> "OptimizationResult":
    """Perform optimization on the given context.

    Args:
        context: The analysis context to optimize

    Returns:
        OptimizationResult containing the optimized representation
    """
    pass

BaseVerifier

Bases: ABC

Base class for all verifiers in the intelligence layer.

Source code in src/multigen/frontend/base.py
class BaseVerifier(ABC):
    """Base class for all verifiers in the intelligence layer."""

    def __init__(self, name: str):
        self.name = name

    @abstractmethod
    def verify(self, context: AnalysisContext, optimized_result: OptimizationResult) -> "VerificationResult":
        """Verify the correctness of an optimization.

        Args:
            context: The original analysis context
            optimized_result: The result of optimization to verify

        Returns:
            VerificationResult containing verification status
        """
        pass

verify(context, optimized_result) abstractmethod

Verify the correctness of an optimization.

Parameters:

Name Type Description Default
context AnalysisContext

The original analysis context

required
optimized_result OptimizationResult

The result of optimization to verify

required

Returns:

Type Description
VerificationResult

VerificationResult containing verification status

Source code in src/multigen/frontend/base.py
@abstractmethod
def verify(self, context: AnalysisContext, optimized_result: OptimizationResult) -> "VerificationResult":
    """Verify the correctness of an optimization.

    Args:
        context: The original analysis context
        optimized_result: The result of optimization to verify

    Returns:
        VerificationResult containing verification status
    """
    pass

IntelligencePipeline

Main pipeline for coordinating analyzers, optimizers, and verifiers.

Source code in src/multigen/frontend/base.py
class IntelligencePipeline:
    """Main pipeline for coordinating analyzers, optimizers, and verifiers."""

    def __init__(self) -> None:
        self.analyzers: list[BaseAnalyzer] = []
        self.optimizers: list[BaseOptimizer] = []
        self.verifiers: list[BaseVerifier] = []

    def add_analyzer(self, analyzer: BaseAnalyzer) -> None:
        """Add an analyzer to the pipeline."""
        self.analyzers.append(analyzer)

    def add_optimizer(self, optimizer: BaseOptimizer) -> None:
        """Add an optimizer to the pipeline."""
        self.optimizers.append(optimizer)

    def add_verifier(self, verifier: BaseVerifier) -> None:
        """Add a verifier to the pipeline."""
        self.verifiers.append(verifier)

    def process(self, context: AnalysisContext) -> dict[str, Any]:
        """Process the context through the entire intelligence pipeline.

        Args:
            context: The analysis context to process

        Returns:
            Dictionary containing all analysis, optimization, and verification results
        """
        results: dict[str, Any] = {
            "analysis_reports": [],
            "optimization_results": [],
            "verification_results": [],
            "final_success": False,
            "pipeline_metadata": {},
        }

        # Run analyzers
        for analyzer in self.analyzers:
            if analyzer.can_analyze(context):
                report = analyzer.analyze(context)
                results["analysis_reports"].append(report)

        # Run optimizers
        for optimizer in self.optimizers:
            if optimizer.can_optimize(context):
                opt_result = optimizer.optimize(context)
                results["optimization_results"].append(opt_result)

                # Run verifiers on optimization results
                for verifier in self.verifiers:
                    ver_result = verifier.verify(context, opt_result)
                    results["verification_results"].append(ver_result)

        # Determine overall success
        results["final_success"] = self._evaluate_pipeline_success(results)

        return results

    def _evaluate_pipeline_success(self, results: dict[str, Any]) -> bool:
        """Evaluate if the pipeline execution was successful."""
        # Check if any critical errors occurred
        for report in results["analysis_reports"]:
            if not report.success:
                return False

        # Check if optimizations were successful and verified
        for opt_result in results["optimization_results"]:
            if not opt_result.is_valid():
                return False

        for ver_result in results["verification_results"]:
            if not ver_result.is_verified():
                return False

        return True

add_analyzer(analyzer)

Add an analyzer to the pipeline.

Source code in src/multigen/frontend/base.py
def add_analyzer(self, analyzer: BaseAnalyzer) -> None:
    """Add an analyzer to the pipeline."""
    self.analyzers.append(analyzer)

add_optimizer(optimizer)

Add an optimizer to the pipeline.

Source code in src/multigen/frontend/base.py
def add_optimizer(self, optimizer: BaseOptimizer) -> None:
    """Add an optimizer to the pipeline."""
    self.optimizers.append(optimizer)

add_verifier(verifier)

Add a verifier to the pipeline.

Source code in src/multigen/frontend/base.py
def add_verifier(self, verifier: BaseVerifier) -> None:
    """Add a verifier to the pipeline."""
    self.verifiers.append(verifier)

process(context)

Process the context through the entire intelligence pipeline.

Parameters:

Name Type Description Default
context AnalysisContext

The analysis context to process

required

Returns:

Type Description
dict[str, Any]

Dictionary containing all analysis, optimization, and verification results

Source code in src/multigen/frontend/base.py
def process(self, context: AnalysisContext) -> dict[str, Any]:
    """Process the context through the entire intelligence pipeline.

    Args:
        context: The analysis context to process

    Returns:
        Dictionary containing all analysis, optimization, and verification results
    """
    results: dict[str, Any] = {
        "analysis_reports": [],
        "optimization_results": [],
        "verification_results": [],
        "final_success": False,
        "pipeline_metadata": {},
    }

    # Run analyzers
    for analyzer in self.analyzers:
        if analyzer.can_analyze(context):
            report = analyzer.analyze(context)
            results["analysis_reports"].append(report)

    # Run optimizers
    for optimizer in self.optimizers:
        if optimizer.can_optimize(context):
            opt_result = optimizer.optimize(context)
            results["optimization_results"].append(opt_result)

            # Run verifiers on optimization results
            for verifier in self.verifiers:
                ver_result = verifier.verify(context, opt_result)
                results["verification_results"].append(ver_result)

    # Determine overall success
    results["final_success"] = self._evaluate_pipeline_success(results)

    return results

OptimizationLevel

Bases: Enum

Levels of optimization aggressiveness.

Source code in src/multigen/frontend/base.py
class OptimizationLevel(Enum):
    """Levels of optimization aggressiveness."""

    NONE = 0
    BASIC = 1
    MODERATE = 2
    AGGRESSIVE = 3
    MAXIMUM = 4

OptimizationResult dataclass

Result of an optimization operation.

Source code in src/multigen/frontend/base.py
@dataclass
class OptimizationResult:
    """Result of an optimization operation."""

    optimizer_name: str
    success: bool
    optimized_ast: Optional[ast.AST]
    transformations: list[str]
    performance_gain_estimate: float  # Estimated performance improvement factor
    safety_analysis: dict[str, bool]  # Safety checks passed
    metadata: dict[str, Any]
    execution_time_ms: float = 0.0

    def is_valid(self) -> bool:
        """Check if the optimization result is valid."""
        return self.success and self.optimized_ast is not None

    def is_safe(self) -> bool:
        """Check if all safety analyses passed."""
        return all(self.safety_analysis.values())

is_safe()

Check if all safety analyses passed.

Source code in src/multigen/frontend/base.py
def is_safe(self) -> bool:
    """Check if all safety analyses passed."""
    return all(self.safety_analysis.values())

is_valid()

Check if the optimization result is valid.

Source code in src/multigen/frontend/base.py
def is_valid(self) -> bool:
    """Check if the optimization result is valid."""
    return self.success and self.optimized_ast is not None

VerificationResult dataclass

Result of a verification operation.

Source code in src/multigen/frontend/base.py
@dataclass
class VerificationResult:
    """Result of a verification operation."""

    verifier_name: str
    success: bool
    is_correct: bool
    proof_generated: bool
    confidence: float  # 0.0 to 1.0
    verification_details: dict[str, Any]
    execution_time_ms: float = 0.0

    def is_verified(self) -> bool:
        """Check if verification was successful and correct."""
        return self.success and self.is_correct

is_verified()

Check if verification was successful and correct.

Source code in src/multigen/frontend/base.py
def is_verified(self) -> bool:
    """Check if verification was successful and correct."""
    return self.success and self.is_correct

Example Usage

Using the AST analyzer:

from multigen.frontend.ast_analyzer import ASTAnalyzer
import ast

code = "def foo(x: int) -> int: return x * 2"
tree = ast.parse(code)
analyzer = ASTAnalyzer()
result = analyzer.analyze(tree, code)

print(f"Functions: {result.functions}")
print(f"Type annotations: {result.type_annotations}")

Using the intelligence pipeline:

from multigen.frontend.base import (
    IntelligencePipeline,
    AnalysisContext,
)
import ast

pipeline = IntelligencePipeline()
code = "def example(): pass"
tree = ast.parse(code)
context = AnalysisContext(source_code=code, ast_node=tree)
results = pipeline.process(context)

See Also