Open16

AITuber関連メモ

kodukikoduki

RAG回りの前提としてのベクトル検索。
ようはベクトルの近似を利用した検索。従来的な全文検索とは異なり「クエリをベクトル化 -> クエリベクトルでストア済みのベクトルに近似のものを探索」というプロセスを踏むので、特徴に応じた比較を行いやすい。また、テキストだけではなく画像や音声、ソーシャルグラフなど様々なターゲットへの検索が行える
https://www.youtube.com/watch?v=gaapYfcQO-8

SolrやElasticも最近は採用をしている。またRAGなどLLMとの組み合わせを前提にしたChroma DBなどがある。
https://www.trychroma.com/

Functional Calling的な仕組みでキーワードを抽出するか、入力文自体を直接ベクトル化してベクトル検索のクエリに使い、出力結果をLLMで整える使い方に見える。たぶん、本質的にはベクトル検索である必要はないが、おそらく現代で最もコンパクトかつ精度が高い全文検索的な知識の格納/探索の方法なんだと思われる。

kodukikoduki

LCELとFunctional CallingとAgentと。
https://secon.dev/entry/2024/01/11/100000/
https://zenn.dev/mah/scraps/bf133682194126
https://api.python.langchain.com/en/latest/agents/langchain.agents.output_parsers.openai_functions.OpenAIFunctionsAgentOutputParser.html#
https://python.langchain.com/docs/expression_language/cookbook/memory

import os
import sys
os.environ["OPENAI_API_KEY"] = open(f"{os.environ['HOMEPATH']}\\.secret\\openai.txt", "r").read()
os.environ["GOOGLE_API_KEY"] = open(f"{os.environ['HOMEPATH']}\\.secret\\gemini.txt", "r").read()

from backend import weather_tool
from backend import short_talk_tool


from langchain.schema.agent import AgentFinish
from langchain.tools.render import format_tool_to_openai_function
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain.chat_models import ChatOpenAI


# 出力フォーマットを定義
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field

class Reply(BaseModel):
    current_emotion: str = Field(description="maxe")
    character_reply: str = Field(description="れん's reply to User")

parser = JsonOutputParser(pydantic_object=Reply)

prompt_system = open("C:\\Users\\koduki\\git\\ai-tuber\\src\\backend\\prompt_system.txt", "r", encoding='utf-8').read()
prompt = ChatPromptTemplate.from_messages([
    ("system", prompt_system),
    ("user", "{input}"),
    MessagesPlaceholder(variable_name="agent_scratchpad"),
]).partial(format_instructions=parser.get_format_instructions())
tools = [weather_tool.weather_api, short_talk_tool.talk]

llm = ChatOpenAI(temperature=0, model='gpt-4-0613')
llm_with_tools = llm.bind(functions=[format_tool_to_openai_function(t) for t in tools])
agent = {
    "input": lambda x: x["input"],
    "agent_scratchpad": lambda x: format_to_openai_functions(x['intermediate_steps'])
} | prompt | llm_with_tools | OpenAIFunctionsAgentOutputParser()


# Agent Loop
intermediate_steps = []
output = agent.invoke({
    "input": "こんにちは",
    "intermediate_steps": intermediate_steps
})
output

# Agent Loop 1
intermediate_steps = []
output = agent.invoke({
    "input": "今日の東京の天気は?",
    "intermediate_steps": intermediate_steps
})
output
print(output.tool, output.tool_input)

tool = next(x for x in tools if x.name == output.tool)
observation = tool.run(output.tool_input)
observation
intermediate_steps.append((output, observation))
intermediate_steps

# Agent Loop 2
output = agent.invoke({
    "input": "今日の東京の天気は?",
    "intermediate_steps": intermediate_steps
})
output
kodukikoduki

AgentExecutorを使わずにLCELだけでtoolの実行を明示的に制御。

import os
import sys
os.environ["OPENAI_API_KEY"] = open(f"{os.environ['HOMEPATH']}\\.secret\\openai.txt", "r").read()
os.environ["GOOGLE_API_KEY"] = open(f"{os.environ['HOMEPATH']}\\.secret\\gemini.txt", "r").read()

from backend import weather_tool
from backend import short_talk_tool

from langchain.memory import ConversationBufferMemory
from langchain.schema.agent import AgentFinish
from langchain.tools.render import format_tool_to_openai_function
from langchain.agents.format_scratchpad import format_to_openai_functions
from langchain.agents.output_parsers import OpenAIFunctionsAgentOutputParser
from langchain.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableLambda, RunnablePassthrough
from langchain_openai import ChatOpenAI
from operator import itemgetter

# 出力フォーマットを定義
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.pydantic_v1 import BaseModel, Field

class Reply(BaseModel):
    current_emotion: str = Field(description="maxe")
    character_reply: str = Field(description="れん's reply to User")

parser = JsonOutputParser(pydantic_object=Reply)

prompt_system = open("C:\\Users\\koduki\\git\\ai-tuber\\src\\backend\\prompt_system.txt", "r", encoding='utf-8').read()

prompt_for_chat = ChatPromptTemplate.from_messages([
    ("system", prompt_system),
    ("user", "{input}"),
    MessagesPlaceholder(variable_name="chat_history"),
    MessagesPlaceholder(variable_name="scratchpad"),
]).partial(format_instructions=parser.get_format_instructions())

prompt_for_tools = ChatPromptTemplate.from_messages([
    ("system", "You are agentai"),
    ("user", "{input}"),
])

tools = [weather_tool.weather_api, short_talk_tool.talk]

llm_for_chat   = ChatOpenAI(temperature=0, model='gpt-4-0613')
llm_with_tools = ChatOpenAI(temperature=0, model='gpt-3.5-turbo').bind(functions=[format_tool_to_openai_function(t) for t in tools])

def call_func(log):
    if isinstance(log, AgentFinish):
        return [(log, [])]
    else:
        tool = next(x for x in tools if x.name == log.tool)
        observation = tool.run(log.tool_input)
        return [(log, observation)]

def store_memory(response):
    print(response)
    output = {"output": response["return_values"].return_values['output']}
    return output

memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
router = (
    RunnablePassthrough().assign(
        chat_history=RunnableLambda(memory.load_memory_variables) | itemgetter("chat_history"),
        scratchpad=prompt_for_tools | llm_with_tools | OpenAIFunctionsAgentOutputParser() | call_func | format_to_openai_functions
    )| RunnablePassthrough().assign(
        return_values=prompt_for_chat | llm_for_chat | OpenAIFunctionsAgentOutputParser(),
    )| store_memory
)



output = router.invoke({
    "input": "今日の東京の天気は?",
})
output

LCELのデバッグはこれ。

from langchain.globals import set_debug
set_debug(True)

router.invoke({
    "input": "今日の東京の天気は?",
})