"""Implementation of TextGrad: Automatic “Differentiation” via Text.This code is not used as we treat the non-optimizable version of LLM judge as a form of eval_fn.We use class EvalFnToTextLoss instead as of today 12/9/2024"""fromtypingimportUnion,TYPE_CHECKINGifTYPE_CHECKING:fromadalflow.coreimportModelClientfrom..parameterimportParameterfromadalflow.optim.loss_componentimportLossComponentfromtypingimportDictfromcopyimportdeepcopyimportlogginglog=logging.getLogger(__name__)TEXT_LOSS_TEMPLATE=r"""<START_OF_SYSTEM_PROMPT>{{eval_system_prompt}}<END_OF_SYSTEM_PROMPT><USER>{{eval_user_prompt}}</USER>"""
[docs]classLLMAsTextLoss(LossComponent):__doc__=r"""Evaluate the final RAG response using an LLM judge. The LLM judge will have: - eval_system_prompt: The system prompt to evaluate the response. - y_hat: The response to evaluate. - Optional: y: The correct response to compare against. The loss will be a Parameter with the evaluation result and can be used to compute gradients. This loss use LLM/Generator as the computation/transformation operator, so it's gradient will be found from the Generator's backward method. """def__init__(self,prompt_kwargs:Dict[str,Union[str,"Parameter"]],model_client:"ModelClient",model_kwargs:Dict[str,object],):fromadalflow.core.generatorimportGeneratorfromadalflow.optim.parameterimportParametersuper().__init__()prompt_kwargs=deepcopy(prompt_kwargs)# TODO: do we really need to convert str to Parameter? what if not?forkey,valueinprompt_kwargs.items():ifisinstance(value,str):prompt_kwargs[key]=Parameter(data=value,requires_opt=False,role_desc=key)self.prompt_kwargs=prompt_kwargs# this is llm as judge (loss) to get the lossself.loss_llm=Generator(name="llm_judge",model_client=model_client,model_kwargs=model_kwargs,template=TEXT_LOSS_TEMPLATE,prompt_kwargs=prompt_kwargs,)# def __call__(self, *args, **kwargs):# return self.forward(*args, **kwargs)