From cd4e4c689ab453cf30e184e2bf860e8522ba967d Mon Sep 17 00:00:00 2001 From: Changho Hwang Date: Fri, 13 Oct 2023 08:48:22 +0000 Subject: [PATCH] Change the default setup --- examples/llama/model_test.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/llama/model_test.py b/examples/llama/model_test.py index 9a9da2109..788df1a5c 100644 --- a/examples/llama/model_test.py +++ b/examples/llama/model_test.py @@ -429,7 +429,7 @@ def test(args, batch_size, seq_len, dtype, world_size): # Configurations args = ModelArgs7B() batch_size = 1 - seq_len = 2048 + seq_len = 1024 dtype = np.float16 world_size = 1 @@ -437,7 +437,7 @@ def test(args, batch_size, seq_len, dtype, world_size): args.vocab_size = 32000 # PyTorch model cannot run all layers due to OOM - args.n_layers = 24 + args.max_seq_len = 1024 # Verify the configurations assert batch_size <= args.max_batch_size