from transformers import PretrainedConfig | |
class GatorConfig(PretrainedConfig): | |
model_type = "gator-transformer" | |
def __init__(self, hidden_size=448, num_attention_heads=8, num_hidden_layers=10, | |
vocab_size=50257, max_position_embeddings=1024, **kwargs): | |
super().__init__(**kwargs) | |
self.hidden_size = hidden_size | |
self.num_attention_heads = num_attention_heads | |
self.num_hidden_layers = num_hidden_layers | |
self.vocab_size = vocab_size | |
self.max_position_embeddings = max_position_embeddings | |