https://langchain-ai.github.io/langgraph/tutorials/workflows/
https://www.reddit.com/r/LocalLLaMA/comments/1iozmns/best_small_model_for_function_calling/
https://gorilla.cs.berkeley.edu/leaderboard.html
https://ollama.com/library/qwen2.5
ollama pull qwen2.5:14b
from langchain_ollama import ChatOllama
# qwen2.5 - 14b (#30 on BFCL leaderboard)
local_llm = "qwen2.5:14b"
model = ChatOllama(model=local_llm, temperature=0.0)