From f0d3696602b4bda8585a66245cfee867f4dd2b22 Mon Sep 17 00:00:00 2001 From: Mansi Mehta Date: Fri, 10 Jan 2025 01:05:00 +0530 Subject: [PATCH 1/2] Added falcon model converter --- .../src/utils/transformers/convert_falcon.py | 54 +++++++++++++++++++ .../utils/transformers/convert_falcon_test.py | 24 +++++++++ .../src/utils/transformers/preset_loader.py | 3 ++ 3 files changed, 81 insertions(+) create mode 100644 keras_hub/src/utils/transformers/convert_falcon.py create mode 100644 keras_hub/src/utils/transformers/convert_falcon_test.py diff --git a/keras_hub/src/utils/transformers/convert_falcon.py b/keras_hub/src/utils/transformers/convert_falcon.py new file mode 100644 index 0000000000..fcdb906e10 --- /dev/null +++ b/keras_hub/src/utils/transformers/convert_falcon.py @@ -0,0 +1,54 @@ +import numpy as np + +from keras_hub.src.models.falcon import FalconBackbone +from keras_hub.src.utils.preset_utils import HF_TOKENIZER_CONFIG_FILE +from keras_hub.src.utils.preset_utils import get_file +from keras_hub.src.utils.preset_utils import load_json + +backbone_cls= FalconBackbone + +def convert_backbone_config(transformers_config): + return { + "vocabulary_size": transformers_config["vocab_size"], + "num_layers": transformers_config["num_hidden_layers"], + "num_attention_heads": transformers_config["num_attention_heads"], + "hidden_dim": transformers_config["hidden_size"], + "intermediate_dim": 32*4, + } + +def transpose_and_reshape(x, shape): + return np.reshape(np.transpose(x), shape) + +def convert_weights(backbone, loader, transformers_config): + # Embeddings + loader.port_weight(keras_variable= backbone.get_layer('token_embedding').embeddings, + hf_weight_key = "word_embeddings.weight") + + + for i in range(backbone.num_layers): + decoder_layer = backbone.get_layer(f"transformer_layer_{i}") + + # Norm layer + loader.port_weight(keras_variable=decoder_layer.input_layernorm.gamma, + hf_weight_key=f'h.{i}.input_layernorm.weight') + + # Attention layers + loader.port_weight(keras_variable=decoder_layer.attention_layer.output_dense.kernel, + hf_weight_key= f'h.{i}.self_attention.dense.weight') + + loader.port_weight(keras_variable= decoder_layer.post_attention_layernorm.gamma, + hf_weight_key=f'h.{i}.self_attention.query_key_value.weight', + hook_fn=lambda hf_tensor, keras_shape: np.mean(np.reshape(hf_tensor, (-1, keras_shape[0])), axis=0)) + + +def convert_tokenizer(cls, preset, **kwargs): + tokenizer_config = load_json(preset, 'tokenizer_config.json') + tokenizer_data = load_json(preset, 'tokenizer.json') + vocab = tokenizer_data["model"]["vocab"] + merges = tokenizer_data["model"].get("merges", None) + + tokenizer_kwargs = { + "vocabulary": vocab, + "merges": merges + } + return cls(**tokenizer_kwargs) \ No newline at end of file diff --git a/keras_hub/src/utils/transformers/convert_falcon_test.py b/keras_hub/src/utils/transformers/convert_falcon_test.py new file mode 100644 index 0000000000..40c2146993 --- /dev/null +++ b/keras_hub/src/utils/transformers/convert_falcon_test.py @@ -0,0 +1,24 @@ +import pytest + +from keras_hub.src.models.falcon.falcon_backbone import FalconBackbone +from keras_hub.src.models.falcon.falcon_causal_lm import FalconCausalLM + +from keras_hub.src.tests.test_case import TestCase + +class TestTask(TestCase): + @pytest.mark.large + def test_convert_tiny_preset(self): + model = FalconCausalLM.from_preset("hf://tiiuae/falcon-7b") + prompt = "What is your favorite condiment?" + model.generate([prompt], max_length=15) + + @pytest.mark.large + def test_class_detection(self): + model = FalconCausalLM.from_preset("hf://tiiuae/falcon-7b") + self.assertIsInstance(model, FalconCausalLM) + model = FalconBackbone.from_preset( + "hf://tiiuae/falcon-7b", + load_weights=False, + ) + self.assertIsInstance(model, FalconBackbone) + diff --git a/keras_hub/src/utils/transformers/preset_loader.py b/keras_hub/src/utils/transformers/preset_loader.py index a3c46f4cf8..58dbbc9d16 100644 --- a/keras_hub/src/utils/transformers/preset_loader.py +++ b/keras_hub/src/utils/transformers/preset_loader.py @@ -13,6 +13,7 @@ from keras_hub.src.utils.transformers import convert_mistral from keras_hub.src.utils.transformers import convert_pali_gemma from keras_hub.src.utils.transformers import convert_vit +from keras_hub.src.utils.transformers import convert_falcon from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader @@ -41,6 +42,8 @@ def __init__(self, preset, config): self.converter = convert_pali_gemma elif model_type == "vit": self.converter = convert_vit + elif model_type == 'falcon': + self.converter = convert_falcon else: raise ValueError( "KerasHub has no converter for huggingface/transformers models " From 21df61ece0474fa85c3f24705e183a030a9df952 Mon Sep 17 00:00:00 2001 From: Mansi Mehta Date: Fri, 10 Jan 2025 12:29:55 +0530 Subject: [PATCH 2/2] Added falcon model converter -1 --- .../src/utils/transformers/convert_falcon.py | 70 ++++++++++--------- .../utils/transformers/convert_falcon_test.py | 3 +- .../src/utils/transformers/preset_loader.py | 4 +- 3 files changed, 41 insertions(+), 36 deletions(-) diff --git a/keras_hub/src/utils/transformers/convert_falcon.py b/keras_hub/src/utils/transformers/convert_falcon.py index fcdb906e10..7168237e44 100644 --- a/keras_hub/src/utils/transformers/convert_falcon.py +++ b/keras_hub/src/utils/transformers/convert_falcon.py @@ -1,11 +1,10 @@ import numpy as np from keras_hub.src.models.falcon import FalconBackbone -from keras_hub.src.utils.preset_utils import HF_TOKENIZER_CONFIG_FILE -from keras_hub.src.utils.preset_utils import get_file from keras_hub.src.utils.preset_utils import load_json -backbone_cls= FalconBackbone +backbone_cls = FalconBackbone + def convert_backbone_config(transformers_config): return { @@ -13,42 +12,49 @@ def convert_backbone_config(transformers_config): "num_layers": transformers_config["num_hidden_layers"], "num_attention_heads": transformers_config["num_attention_heads"], "hidden_dim": transformers_config["hidden_size"], - "intermediate_dim": 32*4, + "intermediate_dim": 32 * 4, } + def transpose_and_reshape(x, shape): - return np.reshape(np.transpose(x), shape) + return np.reshape(np.transpose(x), shape) + def convert_weights(backbone, loader, transformers_config): - # Embeddings - loader.port_weight(keras_variable= backbone.get_layer('token_embedding').embeddings, - hf_weight_key = "word_embeddings.weight") - - - for i in range(backbone.num_layers): - decoder_layer = backbone.get_layer(f"transformer_layer_{i}") - - # Norm layer - loader.port_weight(keras_variable=decoder_layer.input_layernorm.gamma, - hf_weight_key=f'h.{i}.input_layernorm.weight') - - # Attention layers - loader.port_weight(keras_variable=decoder_layer.attention_layer.output_dense.kernel, - hf_weight_key= f'h.{i}.self_attention.dense.weight') - - loader.port_weight(keras_variable= decoder_layer.post_attention_layernorm.gamma, - hf_weight_key=f'h.{i}.self_attention.query_key_value.weight', - hook_fn=lambda hf_tensor, keras_shape: np.mean(np.reshape(hf_tensor, (-1, keras_shape[0])), axis=0)) + # Embeddings + loader.port_weight( + keras_variable=backbone.get_layer("token_embedding").embeddings, + hf_weight_key="word_embeddings.weight", + ) + + for i in range(backbone.num_layers): + decoder_layer = backbone.get_layer(f"transformer_layer_{i}") + + # Norm layer + loader.port_weight( + keras_variable=decoder_layer.input_layernorm.gamma, + hf_weight_key=f"h.{i}.input_layernorm.weight", + ) + + # Attention layers + loader.port_weight( + keras_variable=decoder_layer.attention_layer.output_dense.kernel, + hf_weight_key=f"h.{i}.self_attention.dense.weight", + ) + + loader.port_weight( + keras_variable=decoder_layer.post_attention_layernorm.gamma, + hf_weight_key=f"h.{i}.self_attention.query_key_value.weight", + hook_fn=lambda hf_tensor, keras_shape: np.mean( + np.reshape(hf_tensor, (-1, keras_shape[0])), axis=0 + ), + ) def convert_tokenizer(cls, preset, **kwargs): - tokenizer_config = load_json(preset, 'tokenizer_config.json') - tokenizer_data = load_json(preset, 'tokenizer.json') + tokenizer_data = load_json(preset, "tokenizer.json") vocab = tokenizer_data["model"]["vocab"] - merges = tokenizer_data["model"].get("merges", None) + merges = tokenizer_data["model"].get("merges", None) - tokenizer_kwargs = { - "vocabulary": vocab, - "merges": merges - } - return cls(**tokenizer_kwargs) \ No newline at end of file + tokenizer_kwargs = {"vocabulary": vocab, "merges": merges} + return cls(**tokenizer_kwargs) diff --git a/keras_hub/src/utils/transformers/convert_falcon_test.py b/keras_hub/src/utils/transformers/convert_falcon_test.py index 40c2146993..b6153edd5d 100644 --- a/keras_hub/src/utils/transformers/convert_falcon_test.py +++ b/keras_hub/src/utils/transformers/convert_falcon_test.py @@ -2,9 +2,9 @@ from keras_hub.src.models.falcon.falcon_backbone import FalconBackbone from keras_hub.src.models.falcon.falcon_causal_lm import FalconCausalLM - from keras_hub.src.tests.test_case import TestCase + class TestTask(TestCase): @pytest.mark.large def test_convert_tiny_preset(self): @@ -21,4 +21,3 @@ def test_class_detection(self): load_weights=False, ) self.assertIsInstance(model, FalconBackbone) - diff --git a/keras_hub/src/utils/transformers/preset_loader.py b/keras_hub/src/utils/transformers/preset_loader.py index 58dbbc9d16..3679c31f3d 100644 --- a/keras_hub/src/utils/transformers/preset_loader.py +++ b/keras_hub/src/utils/transformers/preset_loader.py @@ -7,13 +7,13 @@ from keras_hub.src.utils.transformers import convert_bart from keras_hub.src.utils.transformers import convert_bert from keras_hub.src.utils.transformers import convert_distilbert +from keras_hub.src.utils.transformers import convert_falcon from keras_hub.src.utils.transformers import convert_gemma from keras_hub.src.utils.transformers import convert_gpt2 from keras_hub.src.utils.transformers import convert_llama3 from keras_hub.src.utils.transformers import convert_mistral from keras_hub.src.utils.transformers import convert_pali_gemma from keras_hub.src.utils.transformers import convert_vit -from keras_hub.src.utils.transformers import convert_falcon from keras_hub.src.utils.transformers.safetensor_utils import SafetensorLoader @@ -42,7 +42,7 @@ def __init__(self, preset, config): self.converter = convert_pali_gemma elif model_type == "vit": self.converter = convert_vit - elif model_type == 'falcon': + elif model_type == "falcon": self.converter = convert_falcon else: raise ValueError(