[ChatStream] meta-llama/Meta-Llama-3-8B-Instruct 用の ChatPromptクラス
昨日(2024/4/19) に発表になった Llama3 用の ChatPrompt クラス※をご紹介します。
from chatstream import AbstractChatPrompt
SYSTEM_PROMPT = """\
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.\
"""
class ChatPromptMetaLlamaLlama3Instruct(AbstractChatPrompt):
"""
meta-llama/Meta-Llama-3-8B-Instruct
Prompt Guide from
https://huggingface.co/blog/llama3
"""
def __init__(self):
super().__init__()
self.set_requester("user")
self.set_responder("assistant")
def is_skip_special_token(self):
return False
def get_stop_strs(self):
if not self.chat_mode:
return None
return ['<|eot_id|>']
def get_replacement_when_input(self):
return None
def get_replacement_when_output(self): # replace when response_text gotten
return None
def create_prompt(self, opts={}):
if self.chat_mode == False:
return self.get_requester_last_msg()
# Chat Mode == True の場合のプロンプトを構築する
ret = f"<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\n{self.system}<|eot_id|>"
merged_message = ""
for chat_content in self.get_contents(opts):
chat_content_role = chat_content.get_role()
chat_content_message = chat_content.get_message()
if chat_content_role:
if chat_content_message:
merged_message = f"<|start_header_id|>{chat_content_role}<|end_header_id|>\n\n{chat_content_message}<|eot_id|>"
else:
merged_message = f"<|start_header_id|>{chat_content_role}<|end_header_id|>\n\n"
ret += merged_message
return ret
※ChatStream ご利用のお客様は、最新の ChatStream にも取り込まれておりますが、手動で対応したい場合は上のクラスをご利用ください