Args: pretrained (str): Pretrained model name or path. config (BloomConfig): Model config. lora_rank (int): LoRA rank. lora_train_bias (str): LoRA bias training mode. """
def__init__(self, pretrained: str = None, config: Optional[BloomConfig] = None, lora_rank: int = 0, lora_train_bias: str = 'none') -> None: if pretrained isnotNone: model = BloomModel.from_pretrained(pretrained) elif config isnotNone: model = BloomModel(config) else: model = BloomModel(BloomConfig())
classRewardModel(LoRAModule): """ Reward model base class.
Args: model (nn.Module): Reward model. value_head (nn.Module): Value head to get reward score. lora_rank (int): LoRA rank. lora_train_bias (str): LoRA bias training mode. """
if value_head isnotNone: if value_head.out_features != 1: raise ValueError("The value head of reward model's output dim should be 1!") self.value_head = value_head else: self.value_head = nn.Linear(model.config.n_embd, 1)
defforward(self, sequences: torch.LongTensor, attention_mask: Optional[torch.Tensor] = None) -> torch.Tensor: outputs = self.model(sequences, attention_mask=attention_mask) last_hidden_states = outputs['last_hidden_state'] values = self.value_head(last_hidden_states)[:, :-1] value = values.mean(dim=1).squeeze(1) # ensure shape is (B) return value