Skip to content

Class swarmauri_standard.llms.CohereToolModel.CohereToolModel

swarmauri_standard.llms.CohereToolModel.CohereToolModel

CohereToolModel(**data)

Bases: LLMBase

A language model implementation for interacting with Cohere's API, specifically designed for tool-augmented conversations.

This class provides both synchronous and asynchronous methods for generating responses, handling tool calls, and managing conversations with the Cohere API. It supports streaming responses and batch processing of multiple conversations.

ATTRIBUTE DESCRIPTION
api_key

The API key for authenticating with Cohere's API

TYPE: SecretStr

allowed_models

List of supported Cohere model names

TYPE: List[str]

name

The default model name to use

TYPE: str

type

The type identifier for this model

TYPE: Literal['CohereToolModel']

resource

The resource type identifier

TYPE: str

timeout

Maximum timeout for API requests in seconds

TYPE: float

Link to Allowed Models: https://docs.cohere.com/docs/models#command Link to API Key: https://dashboard.cohere.com/api-keys

Initialize the CohereToolModel with the provided configuration.

PARAMETER DESCRIPTION
**data

Keyword arguments for configuring the model, including api_key

TYPE: Dict[str, Any] DEFAULT: {}

Source code in swarmauri_standard/llms/CohereToolModel.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
def __init__(self, **data: Dict[str, Any]) -> None:
    """
    Initialize the CohereToolModel with the provided configuration.

    Args:
        **data (Dict[str, Any]): Keyword arguments for configuring the model, including api_key
    """
    super().__init__(**data)
    headers = {
        "accept": "application/json",
        "content-type": "application/json",
        "authorization": f"Bearer {self.api_key.get_secret_value()}",
    }
    self._client = httpx.Client(
        headers=headers, base_url=self._BASE_URL, timeout=self.timeout
    )
    self._async_client = httpx.AsyncClient(
        headers=headers, base_url=self._BASE_URL, timeout=self.timeout
    )

api_key instance-attribute

api_key

allowed_models class-attribute instance-attribute

allowed_models = [
    "command-a-03-2025",
    "command-r7b-12-2024",
    "command-a-translate-08-2025",
    "command-a-reasoning-08-2025",
    "command-a-vision-07-2025",
    "command-r-plus-04-2024",
    "command-r-plus",
    "command-r-08-2024",
    "command-r-03-2024",
    "command-r",
    "command",
    "command-nightly",
    "command-light",
    "command-light-nightly",
]

name class-attribute instance-attribute

name = 'command-a-03-2025'

type class-attribute instance-attribute

type = 'CohereToolModel'

timeout class-attribute instance-attribute

timeout = 600.0

model_config class-attribute instance-attribute

model_config = ConfigDict(
    extra="allow", arbitrary_types_allowed=True
)

id class-attribute instance-attribute

id = Field(default_factory=generate_id)

members class-attribute instance-attribute

members = None

owners class-attribute instance-attribute

owners = None

host class-attribute instance-attribute

host = None

default_logger class-attribute

default_logger = None

logger class-attribute instance-attribute

logger = None

resource class-attribute instance-attribute

resource = Field(default=LLM.value, frozen=True)

version class-attribute instance-attribute

version = '0.1.0'

include_usage class-attribute instance-attribute

include_usage = True

BASE_URL class-attribute instance-attribute

BASE_URL = None

predict

predict(
    conversation,
    toolkit=None,
    temperature=0.3,
    max_tokens=1024,
)

Generate a response for a conversation synchronously.

PARAMETER DESCRIPTION
conversation

The conversation to generate a response for

TYPE: Conversation

toolkit

Optional toolkit containing available tools

TYPE: Optional[Toolkit] DEFAULT: None

temperature

Sampling temperature

TYPE: float DEFAULT: 0.3

max_tokens

Maximum number of tokens to generate

TYPE: int DEFAULT: 1024

RETURNS DESCRIPTION
Conversation

The updated conversation with the model's response

TYPE: Conversation

Source code in swarmauri_standard/llms/CohereToolModel.py
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
@retry_on_status_codes((429, 529), max_retries=1)
def predict(
    self,
    conversation: Conversation,
    toolkit: Optional[Toolkit] = None,
    temperature: float = 0.3,
    max_tokens: int = 1024,
) -> Conversation:
    """
    Generate a response for a conversation synchronously.

    Args:
        conversation (Conversation): The conversation to generate a response for
        toolkit (Optional[Toolkit]): Optional toolkit containing available tools
        temperature (float): Sampling temperature
        max_tokens (int): Maximum number of tokens to generate

    Returns:
        Conversation: The updated conversation with the model's response
    """
    conversation = self._ensure_conversation_has_message(conversation)
    formatted_messages = self._format_messages(conversation.history)
    tools = self._schema_convert_tools(toolkit.tools) if toolkit else None

    with DurationManager() as tool_timer:
        tool_payload = self._prepare_chat_payload(
            message=formatted_messages[-1]["message"],
            chat_history=(
                formatted_messages[:-1] if len(formatted_messages) > 1 else None
            ),
            tools=tools,
            force_single_step=True,
        )

        tool_response = self._client.post("/chat", json=tool_payload)
        tool_response.raise_for_status()
        tool_data = tool_response.json()

    tool_results = self._process_tool_calls(tool_data, toolkit)

    with DurationManager() as response_timer:
        response_payload = self._prepare_chat_payload(
            message=formatted_messages[-1]["message"],
            chat_history=(
                formatted_messages[:-1] if len(formatted_messages) > 1 else None
            ),
            tools=tools,
            tool_results=tool_results,
            temperature=temperature,
            force_single_step=True,
        )

        response = self._client.post("/chat", json=response_payload)
        response.raise_for_status()
        response_data = response.json()

        usage_data = response_data.get("usage", {})

    usage = self._prepare_usage_data(
        usage_data, tool_timer.duration, response_timer.duration
    )

    conversation.add_message(
        AgentMessage(content=response_data.get("text", ""), usage=usage)
    )
    return conversation

stream

stream(
    conversation,
    toolkit=None,
    temperature=0.3,
    max_tokens=1024,
)

Stream a response for a conversation synchronously.

PARAMETER DESCRIPTION
conversation

The conversation to generate a response for

TYPE: Conversation

toolkit

Optional toolkit containing available tools

TYPE: Optional[Toolkit] DEFAULT: None

temperature

Sampling temperature

TYPE: float DEFAULT: 0.3

max_tokens

Maximum number of tokens to generate

TYPE: int DEFAULT: 1024

RETURNS DESCRIPTION
Iterator[str]

Iterator[str]: An iterator yielding response chunks

Source code in swarmauri_standard/llms/CohereToolModel.py
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
@retry_on_status_codes((429, 529), max_retries=1)
def stream(
    self,
    conversation: Conversation,
    toolkit: Optional[Toolkit] = None,
    temperature: float = 0.3,
    max_tokens: int = 1024,
) -> Iterator[str]:
    """
    Stream a response for a conversation synchronously.

    Args:
        conversation (Conversation): The conversation to generate a response for
        toolkit (Optional[Toolkit]): Optional toolkit containing available tools
        temperature (float): Sampling temperature
        max_tokens (int): Maximum number of tokens to generate

    Returns:
        Iterator[str]: An iterator yielding response chunks
    """
    conversation = self._ensure_conversation_has_message(conversation)
    formatted_messages = self._format_messages(conversation.history)
    tools = self._schema_convert_tools(toolkit.tools) if toolkit else None

    # Handle tool call first
    tool_payload = self._prepare_chat_payload(
        message=formatted_messages[-1]["message"],
        chat_history=(
            formatted_messages[:-1] if len(formatted_messages) > 1 else None
        ),
        tools=tools,
        force_single_step=True,
    )
    with DurationManager() as prompt_timer:
        tool_response = self._client.post("/chat", json=tool_payload)
        tool_response.raise_for_status()
        tool_data = tool_response.json()

    tool_results = self._process_tool_calls(tool_data, toolkit)

    # Prepare streaming payload
    stream_payload = self._prepare_chat_payload(
        message=formatted_messages[-1]["message"],
        chat_history=(
            formatted_messages[:-1] if len(formatted_messages) > 1 else None
        ),
        tools=tools,
        tool_results=tool_results,
        temperature=temperature,
        force_single_step=True,
    )
    stream_payload["stream"] = True

    collected_content = []
    usage_data = {}
    with DurationManager() as completion_timer:
        with self._client.stream("POST", "/chat", json=stream_payload) as response:
            response.raise_for_status()
            for line in response.iter_lines():
                if line:
                    chunk = json.loads(line)
                    if "text" in chunk:
                        content = chunk["text"]
                        collected_content.append(content)
                        yield content
                    elif "usage" in chunk:
                        usage_data = chunk["usage"]

    full_content = "".join(collected_content)
    usage = self._prepare_usage_data(
        usage_data, prompt_timer.duration, completion_timer.duration
    )
    conversation.add_message(AgentMessage(content=full_content, usage=usage))

apredict async

apredict(
    conversation,
    toolkit=None,
    temperature=0.3,
    max_tokens=1024,
)

Generate a response for a conversation asynchronously.

PARAMETER DESCRIPTION
conversation

The conversation to generate a response for

TYPE: Conversation

toolkit

Optional toolkit containing available tools

TYPE: Optional[Toolkit] DEFAULT: None

temperature

Sampling temperature

TYPE: float DEFAULT: 0.3

max_tokens

Maximum number of tokens to generate

TYPE: int DEFAULT: 1024

RETURNS DESCRIPTION
Conversation

The updated conversation with the model's response

TYPE: Conversation

Source code in swarmauri_standard/llms/CohereToolModel.py
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
@retry_on_status_codes((429, 529), max_retries=1)
async def apredict(
    self,
    conversation: Conversation,
    toolkit: Optional[Toolkit] = None,
    temperature: float = 0.3,
    max_tokens: int = 1024,
) -> Conversation:
    """
    Generate a response for a conversation asynchronously.

    Args:
        conversation (Conversation): The conversation to generate a response for
        toolkit (Optional[Toolkit]): Optional toolkit containing available tools
        temperature (float): Sampling temperature
        max_tokens (int): Maximum number of tokens to generate

    Returns:
        Conversation: The updated conversation with the model's response
    """
    conversation = self._ensure_conversation_has_message(conversation)
    formatted_messages = self._format_messages(conversation.history)
    tools = self._schema_convert_tools(toolkit.tools) if toolkit else None

    with DurationManager() as tool_timer:
        tool_payload = self._prepare_chat_payload(
            message=formatted_messages[-1]["message"],
            chat_history=(
                formatted_messages[:-1] if len(formatted_messages) > 1 else None
            ),
            tools=tools,
            force_single_step=True,
        )

        tool_response = await self._async_client.post("/chat", json=tool_payload)
        tool_response.raise_for_status()
        tool_data = tool_response.json()

    tool_results = self._process_tool_calls(tool_data, toolkit)

    with DurationManager() as response_timer:
        response_payload = self._prepare_chat_payload(
            message=formatted_messages[-1]["message"],
            chat_history=(
                formatted_messages[:-1] if len(formatted_messages) > 1 else None
            ),
            tools=tools,
            tool_results=tool_results,
            temperature=temperature,
            force_single_step=True,
        )

        response = await self._async_client.post("/chat", json=response_payload)
        response.raise_for_status()
        response_data = response.json()

        usage_data = response_data.get("usage", {})

    usage = self._prepare_usage_data(
        usage_data, tool_timer.duration, response_timer.duration
    )

    conversation.add_message(
        AgentMessage(content=response_data.get("text", ""), usage=usage)
    )
    return conversation

astream async

astream(
    conversation,
    toolkit=None,
    temperature=0.3,
    max_tokens=1024,
)

Stream a response for a conversation asynchronously.

PARAMETER DESCRIPTION
conversation

The conversation to generate a response for

TYPE: Conversation

toolkit

Optional toolkit containing available tools

TYPE: Optional[Toolkit] DEFAULT: None

temperature

Sampling temperature

TYPE: float DEFAULT: 0.3

max_tokens

Maximum number of tokens to generate

TYPE: int DEFAULT: 1024

RETURNS DESCRIPTION
AsyncIterator[str]

AsyncIterator[str]: An async iterator yielding response chunks

Source code in swarmauri_standard/llms/CohereToolModel.py
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
@retry_on_status_codes((429, 529), max_retries=1)
async def astream(
    self,
    conversation: Conversation,
    toolkit: Optional[Toolkit] = None,
    temperature: float = 0.3,
    max_tokens: int = 1024,
) -> AsyncIterator[str]:
    """
    Stream a response for a conversation asynchronously.

    Args:
        conversation (Conversation): The conversation to generate a response for
        toolkit (Optional[Toolkit]): Optional toolkit containing available tools
        temperature (float): Sampling temperature
        max_tokens (int): Maximum number of tokens to generate

    Returns:
        AsyncIterator[str]: An async iterator yielding response chunks
    """
    conversation = self._ensure_conversation_has_message(conversation)
    formatted_messages = self._format_messages(conversation.history)
    tools = self._schema_convert_tools(toolkit.tools) if toolkit else None

    # Handle tool call first
    tool_payload = self._prepare_chat_payload(
        message=formatted_messages[-1]["message"],
        chat_history=(
            formatted_messages[:-1] if len(formatted_messages) > 1 else None
        ),
        tools=tools,
        force_single_step=True,
    )
    with DurationManager() as prompt_timer:
        tool_response = await self._async_client.post("/chat", json=tool_payload)
        tool_response.raise_for_status()
        tool_data = tool_response.json()

    tool_results = self._process_tool_calls(tool_data, toolkit)

    # Prepare streaming payload
    stream_payload = self._prepare_chat_payload(
        message=formatted_messages[-1]["message"],
        chat_history=(
            formatted_messages[:-1] if len(formatted_messages) > 1 else None
        ),
        tools=tools,
        tool_results=tool_results,
        temperature=temperature,
        force_single_step=True,
    )
    stream_payload["stream"] = True

    collected_content = []
    usage_data = {}

    async with self._async_client.stream(
        "POST", "/chat", json=stream_payload
    ) as response:
        response.raise_for_status()
        with DurationManager() as completion_timer:
            async for line in response.aiter_lines():
                if line:
                    try:
                        chunk = json.loads(line)
                        if "text" in chunk:
                            content = chunk["text"]
                            collected_content.append(content)
                            yield content
                        elif "usage" in chunk:
                            usage_data = chunk["usage"]
                    except json.JSONDecodeError:
                        continue

    full_content = "".join(collected_content)
    usage = self._prepare_usage_data(
        usage_data, prompt_timer.duration, completion_timer.duration
    )
    conversation.add_message(AgentMessage(content=full_content, usage=usage))

batch

batch(
    conversations,
    toolkit=None,
    temperature=0.3,
    max_tokens=1024,
)

Process multiple conversations in batch mode synchronously.

This method takes a list of conversations and processes them sequentially using the predict method. Each conversation is processed independently with the same parameters.

PARAMETER DESCRIPTION
conversations

A list of conversation objects to process

TYPE: List[Conversation]

toolkit

The toolkit containing available tools for the model

TYPE: Optional[Toolkit] DEFAULT: None

temperature

The sampling temperature for response generation. Defaults to 0.3

TYPE: float DEFAULT: 0.3

max_tokens

The maximum number of tokens to generate for each response. Defaults to 1024

TYPE: int DEFAULT: 1024

RETURNS DESCRIPTION
List[Conversation]

List[Conversation]: A list of processed conversations with their respective responses

Source code in swarmauri_standard/llms/CohereToolModel.py
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
def batch(
    self,
    conversations: List[Conversation],
    toolkit: Optional[Toolkit] = None,
    temperature: float = 0.3,
    max_tokens: int = 1024,
) -> List[Conversation]:
    """
    Process multiple conversations in batch mode synchronously.

    This method takes a list of conversations and processes them sequentially using
    the predict method. Each conversation is processed independently with the same
    parameters.

    Args:
        conversations (List[Conversation]): A list of conversation objects to process
        toolkit (Optional[Toolkit]): The toolkit containing available tools for the model
        temperature (float): The sampling temperature for response generation.
            Defaults to 0.3
        max_tokens (int): The maximum number of tokens to generate for each
            response. Defaults to 1024

    Returns:
        List[Conversation]: A list of processed conversations with their respective responses
    """
    return [
        self.predict(
            conv, toolkit=toolkit, temperature=temperature, max_tokens=max_tokens
        )
        for conv in conversations
    ]

abatch async

abatch(
    conversations,
    toolkit=None,
    temperature=0.3,
    max_tokens=1024,
    max_concurrent=5,
)

Process multiple conversations in batch mode asynchronously.

This method processes multiple conversations concurrently while limiting the maximum number of simultaneous requests using a semaphore. This helps prevent overwhelming the API service while still maintaining efficient processing.

PARAMETER DESCRIPTION
conversations

A list of conversation objects to process

TYPE: List[Conversation]

toolkit

The toolkit containing available tools for the model

TYPE: Optional[Toolkit] DEFAULT: None

temperature

The sampling temperature for response generation. Defaults to 0.3

TYPE: float DEFAULT: 0.3

max_tokens

The maximum number of tokens to generate for each response. Defaults to 1024

TYPE: int DEFAULT: 1024

max_concurrent

The maximum number of conversations to process simultaneously. Defaults to 5

TYPE: int DEFAULT: 5

RETURNS DESCRIPTION
List[Conversation]

List[Conversation]: A list of processed conversations with their respective responses

Note

The max_concurrent parameter helps control API usage and prevent rate limiting while still allowing for parallel processing of multiple conversations.

Source code in swarmauri_standard/llms/CohereToolModel.py
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
async def abatch(
    self,
    conversations: List[Conversation],
    toolkit: Optional[Toolkit] = None,
    temperature: float = 0.3,
    max_tokens: int = 1024,
    max_concurrent: int = 5,
) -> List[Conversation]:
    """
    Process multiple conversations in batch mode asynchronously.

    This method processes multiple conversations concurrently while limiting the
    maximum number of simultaneous requests using a semaphore. This helps prevent
    overwhelming the API service while still maintaining efficient processing.

    Args:
        conversations (List[Conversation]): A list of conversation objects to process
        toolkit (Optional[Toolkit]): The toolkit containing available tools for the model
        temperature (float): The sampling temperature for response generation.
            Defaults to 0.3
        max_tokens (int): The maximum number of tokens to generate for each
            response. Defaults to 1024
        max_concurrent (int): The maximum number of conversations to process
            simultaneously. Defaults to 5

    Returns:
        List[Conversation]: A list of processed conversations with their respective responses

    Note:
        The max_concurrent parameter helps control API usage and prevent rate limiting
        while still allowing for parallel processing of multiple conversations.
    """
    semaphore = asyncio.Semaphore(max_concurrent)

    async def process_conversation(conv):
        async with semaphore:
            return await self.apredict(
                conv,
                toolkit=toolkit,
                temperature=temperature,
                max_tokens=max_tokens,
            )

    tasks = [process_conversation(conv) for conv in conversations]
    return await asyncio.gather(*tasks)

get_allowed_models

get_allowed_models()

Query the LLMProvider API endpoint to get the list of allowed models.

RETURNS DESCRIPTION
List[str]

List[str]: List of allowed model names from the API

Source code in swarmauri_standard/llms/CohereToolModel.py
674
675
676
677
678
679
680
681
682
683
684
def get_allowed_models(self) -> List[str]:
    """
    Query the LLMProvider API endpoint to get the list of allowed models.

    Returns:
        List[str]: List of allowed model names from the API
    """
    response = self._client.get("/models")
    response.raise_for_status()
    models_data = response.json()
    return models_data.get("models", [])

register_model classmethod

register_model()

Decorator to register a base model in the unified registry.

RETURNS DESCRIPTION
Callable

A decorator function that registers the model class.

TYPE: Callable[[Type[BaseModel]], Type[BaseModel]]

Source code in swarmauri_base/DynamicBase.py
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
@classmethod
def register_model(cls) -> Callable[[Type[BaseModel]], Type[BaseModel]]:
    """
    Decorator to register a base model in the unified registry.

    Returns:
        Callable: A decorator function that registers the model class.
    """

    def decorator(model_cls: Type[BaseModel]):
        """Register ``model_cls`` as a base model."""
        model_name = model_cls.__name__
        if model_name in cls._registry:
            glogger.warning(
                "Model '%s' is already registered; skipping duplicate.", model_name
            )
            return model_cls

        cls._registry[model_name] = {"model_cls": model_cls, "subtypes": {}}
        glogger.debug("Registered base model '%s'.", model_name)
        DynamicBase._recreate_models()
        return model_cls

    return decorator

register_type classmethod

register_type(resource_type=None, type_name=None)

Decorator to register a subtype under one or more base models in the unified registry.

PARAMETER DESCRIPTION
resource_type

The base model(s) under which to register the subtype. If None, all direct base classes (except DynamicBase) are used.

TYPE: Optional[Union[Type[T], List[Type[T]]]] DEFAULT: None

type_name

An optional custom type name for the subtype.

TYPE: Optional[str] DEFAULT: None

RETURNS DESCRIPTION
Callable

A decorator function that registers the subtype.

TYPE: Callable[[Type[DynamicBase]], Type[DynamicBase]]

Source code in swarmauri_base/DynamicBase.py
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
@classmethod
def register_type(
    cls,
    resource_type: Optional[Union[Type[T], List[Type[T]]]] = None,
    type_name: Optional[str] = None,
) -> Callable[[Type["DynamicBase"]], Type["DynamicBase"]]:
    """
    Decorator to register a subtype under one or more base models in the unified registry.

    Parameters:
        resource_type (Optional[Union[Type[T], List[Type[T]]]]):
            The base model(s) under which to register the subtype. If None, all direct base classes (except DynamicBase)
            are used.
        type_name (Optional[str]): An optional custom type name for the subtype.

    Returns:
        Callable: A decorator function that registers the subtype.
    """

    def decorator(subclass: Type["DynamicBase"]):
        """Register ``subclass`` as a subtype."""
        if resource_type is None:
            resource_types = [
                base for base in subclass.__bases__ if base is not cls
            ]
        elif not isinstance(resource_type, list):
            resource_types = [resource_type]
        else:
            resource_types = resource_type

        for rt in resource_types:
            if not issubclass(subclass, rt):
                raise TypeError(
                    f"'{subclass.__name__}' must be a subclass of '{rt.__name__}'."
                )
            final_type_name = type_name or getattr(
                subclass, "_type", subclass.__name__
            )
            base_model_name = rt.__name__

            if base_model_name not in cls._registry:
                cls._registry[base_model_name] = {"model_cls": rt, "subtypes": {}}
                glogger.debug(
                    "Created new registry entry for base model '%s'.",
                    base_model_name,
                )

            subtypes_dict = cls._registry[base_model_name]["subtypes"]
            if final_type_name in subtypes_dict:
                glogger.warning(
                    "Type '%s' already exists under '%s'; skipping duplicate.",
                    final_type_name,
                    base_model_name,
                )
                continue

            subtypes_dict[final_type_name] = subclass
            glogger.debug(
                "Registered '%s' as '%s' under '%s'.",
                subclass.__name__,
                final_type_name,
                base_model_name,
            )

        DynamicBase._recreate_models()
        return subclass

    return decorator

model_validate_toml classmethod

model_validate_toml(toml_data)

Validate a model from a TOML string.

Source code in swarmauri_base/TomlMixin.py
12
13
14
15
16
17
18
19
20
21
22
23
24
@classmethod
def model_validate_toml(cls, toml_data: str):
    """Validate a model from a TOML string."""
    try:
        # Parse TOML into a Python dictionary
        toml_content = tomllib.loads(toml_data)

        # Convert the dictionary to JSON and validate using Pydantic
        return cls.model_validate_json(json.dumps(toml_content))
    except tomllib.TOMLDecodeError as e:
        raise ValueError(f"Invalid TOML data: {e}")
    except ValidationError as e:
        raise ValueError(f"Validation failed: {e}")

model_dump_toml

model_dump_toml(
    fields_to_exclude=None, api_key_placeholder=None
)

Return a TOML representation of the model.

Source code in swarmauri_base/TomlMixin.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
def model_dump_toml(self, fields_to_exclude=None, api_key_placeholder=None):
    """Return a TOML representation of the model."""
    if fields_to_exclude is None:
        fields_to_exclude = []

    # Load the JSON string into a Python dictionary
    json_data = json.loads(self.model_dump_json())

    # Function to recursively remove specific keys and handle api_key placeholders
    def process_fields(data, fields_to_exclude):
        """Recursively filter fields and apply placeholders."""
        if isinstance(data, dict):
            return {
                key: (
                    api_key_placeholder
                    if key == "api_key" and api_key_placeholder is not None
                    else process_fields(value, fields_to_exclude)
                )
                for key, value in data.items()
                if key not in fields_to_exclude
            }
        elif isinstance(data, list):
            return [process_fields(item, fields_to_exclude) for item in data]
        else:
            return data

    # Filter the JSON data
    filtered_data = process_fields(json_data, fields_to_exclude)

    # Convert the filtered data into TOML
    return toml.dumps(filtered_data)

model_validate_yaml classmethod

model_validate_yaml(yaml_data)

Validate a model from a YAML string.

Source code in swarmauri_base/YamlMixin.py
11
12
13
14
15
16
17
18
19
20
21
22
23
@classmethod
def model_validate_yaml(cls, yaml_data: str):
    """Validate a model from a YAML string."""
    try:
        # Parse YAML into a Python dictionary
        yaml_content = yaml.safe_load(yaml_data)

        # Convert the dictionary to JSON and validate using Pydantic
        return cls.model_validate_json(json.dumps(yaml_content))
    except yaml.YAMLError as e:
        raise ValueError(f"Invalid YAML data: {e}")
    except ValidationError as e:
        raise ValueError(f"Validation failed: {e}")

model_dump_yaml

model_dump_yaml(
    fields_to_exclude=None, api_key_placeholder=None
)

Return a YAML representation of the model.

Source code in swarmauri_base/YamlMixin.py
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
def model_dump_yaml(self, fields_to_exclude=None, api_key_placeholder=None):
    """Return a YAML representation of the model."""
    if fields_to_exclude is None:
        fields_to_exclude = []

    # Load the JSON string into a Python dictionary
    json_data = json.loads(self.model_dump_json())

    # Function to recursively remove specific keys and handle api_key placeholders
    def process_fields(data, fields_to_exclude):
        """Recursively filter fields and apply placeholders."""
        if isinstance(data, dict):
            return {
                key: (
                    api_key_placeholder
                    if key == "api_key" and api_key_placeholder is not None
                    else process_fields(value, fields_to_exclude)
                )
                for key, value in data.items()
                if key not in fields_to_exclude
            }
        elif isinstance(data, list):
            return [process_fields(item, fields_to_exclude) for item in data]
        else:
            return data

    # Filter the JSON data
    filtered_data = process_fields(json_data, fields_to_exclude)

    # Convert the filtered data into YAML using safe mode
    return yaml.safe_dump(filtered_data, default_flow_style=False)

model_post_init

model_post_init(logger=None)

Assign a logger instance after model initialization.

Source code in swarmauri_base/LoggerMixin.py
23
24
25
26
27
28
def model_post_init(self, logger: Optional[FullUnion[LoggerBase]] = None) -> None:
    """Assign a logger instance after model initialization."""

    # Directly assign the provided FullUnion[LoggerBase] or fallback to the
    # class-level default.
    self.logger = self.logger or logger or self.default_logger

add_allowed_model

add_allowed_model(model)

Add a new model to the list of allowed models.

RAISES DESCRIPTION
ValueError

If the model is already in the allowed models list.

Source code in swarmauri_base/llms/LLMBase.py
36
37
38
39
40
41
42
43
44
45
def add_allowed_model(self, model: str) -> None:
    """
    Add a new model to the list of allowed models.

    Raises:
        ValueError: If the model is already in the allowed models list.
    """
    if model in self.allowed_models:
        raise ValueError(f"Model '{model}' is already allowed.")
    self.allowed_models.append(model)

remove_allowed_model

remove_allowed_model(model)

Remove a model from the list of allowed models.

RAISES DESCRIPTION
ValueError

If the model is not in the allowed models list.

Source code in swarmauri_base/llms/LLMBase.py
47
48
49
50
51
52
53
54
55
56
def remove_allowed_model(self, model: str) -> None:
    """
    Remove a model from the list of allowed models.

    Raises:
        ValueError: If the model is not in the allowed models list.
    """
    if model not in self.allowed_models:
        raise ValueError(f"Model '{model}' is not in the allowed models list.")
    self.allowed_models.remove(model)