Skip to main content
The parsers module provides utilities for parsing tool calls from model outputs and formatting chat templates.

ToolParser

Abstract base class for tool call parsers.
from rllm.parser import ToolParser

Methods

parse

Extract tool calls from model response.
tool_calls = parser.parse(model_response)
model_response
str
Raw model output containing tool calls.
tool_calls
list[ToolCall]
List of parsed tool calls.

get_tool_prompt

Get the tool prompt for the model.
prompt = parser.get_tool_prompt(tools_schema)
tools_schema
str
JSON schema for available tools.
prompt
str
Formatted tool prompt to include in system message.

get_parser

Factory method to get appropriate parser for a tokenizer.
parser = ToolParser.get_parser(tokenizer)
tokenizer
PreTrainedTokenizer
Tokenizer to detect parser type from.
parser
ToolParser
Appropriate parser instance (R1ToolParser or QwenToolParser).

R1ToolParser

Parser for DeepSeek R1-style tool call format.
from rllm.parser import R1ToolParser

parser = R1ToolParser()

Format

R1 format uses special tokens:
<|tool_calls_begin|>
<|tool_call_begin|>function<|tool_sep|>function_name
{"param1": "value1", "param2": "value2"}
<|tool_call_end|>
<|tool_calls_end|>

Methods

tool_calls = parser.parse(model_response)
prompt = parser.get_tool_prompt(tools_schema)

QwenToolParser

Parser for Qwen-style tool call format.
from rllm.parser import QwenToolParser

parser = QwenToolParser()

Format

Qwen format uses XML-like tags:
<tool_call>{"name": "function_name", "arguments": {...}}</tool_call>

Methods

tool_calls = parser.parse(model_response)
prompt = parser.get_tool_prompt(tools_schema)

ChatTemplateParser

Parser for converting between chat format and tokens.
from rllm.parser import ChatTemplateParser

parser = ChatTemplateParser.get_parser(tokenizer)

Methods

apply_chat_template

Convert messages to tokens.
tokens = parser.apply_chat_template(
    messages,
    add_generation_prompt=True
)

decode

Convert tokens back to text.
text = parser.decode(tokens)

Example: Parsing Tool Calls (R1)

from rllm.parser import R1ToolParser
from rllm.tools import ToolCall

parser = R1ToolParser()

model_response = """
<|tool_calls_begin|>
<|tool_call_begin|>function<|tool_sep|>search_web
{"query": "What is rLLM?", "num_results": 5}
<|tool_call_end|>
<|tool_calls_end|>
"""

tool_calls = parser.parse(model_response)

for call in tool_calls:
    print(f"Tool: {call.name}")
    print(f"Arguments: {call.arguments}")

# Output:
# Tool: search_web
# Arguments: {'query': 'What is rLLM?', 'num_results': 5}

Example: Parsing Tool Calls (Qwen)

from rllm.parser import QwenToolParser

parser = QwenToolParser()

model_response = '''
Let me search for that information.
<tool_call>{"name": "search", "arguments": {"query": "capital of France"}}</tool_call>
'''

tool_calls = parser.parse(model_response)

for call in tool_calls:
    print(f"Tool: {call.name}")
    print(f"Arguments: {call.arguments}")

# Output:
# Tool: search
# Arguments: {'query': 'capital of France'}

Example: Multiple Tool Calls

from rllm.parser import QwenToolParser

parser = QwenToolParser()

model_response = '''
<tool_call>{"name": "calculator", "arguments": {"a": 5, "b": 3, "op": "add"}}</tool_call>
<tool_call>{"name": "calculator", "arguments": {"a": 10, "b": 2, "op": "multiply"}}</tool_call>
'''

tool_calls = parser.parse(model_response)

print(f"Found {len(tool_calls)} tool calls")
for i, call in enumerate(tool_calls):
    print(f"\nCall {i+1}:")
    print(f"  Name: {call.name}")
    print(f"  Args: {call.arguments}")

Example: Getting Tool Prompt

from rllm.parser import R1ToolParser
import json

parser = R1ToolParser()

tools_schema = json.dumps([
    {
        "type": "function",
        "function": {
            "name": "search_web",
            "description": "Search the web for information",
            "parameters": {
                "type": "object",
                "properties": {
                    "query": {"type": "string", "description": "Search query"}
                },
                "required": ["query"]
            }
        }
    }
], indent=2)

tool_prompt = parser.get_tool_prompt(tools_schema)

print(tool_prompt)
# Outputs formatted tool prompt with instructions

Example: Auto-detecting Parser

from transformers import AutoTokenizer
from rllm.parser import ToolParser

# DeepSeek model -> R1ToolParser
tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B")
parser = ToolParser.get_parser(tokenizer)
print(type(parser).__name__)  # R1ToolParser

# Qwen model -> QwenToolParser
tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B")
parser = ToolParser.get_parser(tokenizer)
print(type(parser).__name__)  # QwenToolParser

Example: Using Parser with Agent

from transformers import AutoTokenizer
from rllm.parser import ToolParser
from rllm.agents import ToolAgent

tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen3-4B")
parser = ToolParser.get_parser(tokenizer)

agent = ToolAgent(
    tool_map={"search": MySearchTool},
    parser_name="qwen",  # or pass parser directly
    system_prompt="You are a helpful assistant."
)

# Agent will use the parser to extract tool calls from model responses

Example: ChatTemplateParser

from transformers import AutoTokenizer
from rllm.parser import ChatTemplateParser

tokenizer = AutoTokenizer.from_pretrained("deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B")
parser = ChatTemplateParser.get_parser(tokenizer)

messages = [
    {"role": "system", "content": "You are a helpful assistant."},
    {"role": "user", "content": "What is 2+2?"}
]

# Convert to tokens
tokens = parser.apply_chat_template(
    messages,
    add_generation_prompt=True,
    return_tensors="pt"
)

print(f"Tokens: {tokens}")

# Decode back to text
text = parser.decode(tokens[0])
print(f"Formatted: {text}")