Skip to content

Commit

Permalink
Use login_and_download_hf_lm in finetuning path
Browse files Browse the repository at this point in the history
Signed-off-by: Mamta Singh <[email protected]>
  • Loading branch information
quic-mamta committed Jan 21, 2025
1 parent 2904183 commit 0e0c814
Showing 1 changed file with 6 additions and 4 deletions.
10 changes: 6 additions & 4 deletions QEfficient/cloud/finetune.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,7 @@
get_preprocessed_dataset,
)
from QEfficient.finetune.utils.train_utils import get_longest_seq_length, print_model_size, train
from QEfficient.utils._utils import login_and_download_hf_lm

try:
import torch_qaic # noqa: F401
Expand Down Expand Up @@ -62,9 +63,9 @@ def main(**kwargs):
# TODO: may have to init qccl backend, next try run with torchrun command
torch_device = torch.device(device)
assert torch_device.type != "cpu", "Host doesn't support single-node DDP"
assert torch_device.index is None, (
f"DDP requires specification of device type only, however provided device index as well: {torch_device}"
)
assert (
torch_device.index is None
), f"DDP requires specification of device type only, however provided device index as well: {torch_device}"
dist.init_process_group(backend=train_config.dist_backend)
# from here onward "qaic/cuda" will automatically map to "qaic:i/cuda:i", where i = process rank
getattr(torch, torch_device.type).set_device(dist.get_rank())
Expand All @@ -76,8 +77,9 @@ def main(**kwargs):

# Load the pre-trained model and setup its configuration
# config = AutoConfig.from_pretrained(train_config.model_name)
pretrained_model_path = login_and_download_hf_lm(train_config.model_name)
model = AutoModelForCausalLM.from_pretrained(
train_config.model_name,
pretrained_model_path,
use_cache=False,
attn_implementation="sdpa",
torch_dtype=torch.float16,
Expand Down

0 comments on commit 0e0c814

Please sign in to comment.