kingabzpro commited on
Commit
6b7bfc8
·
verified ·
1 Parent(s): 028c438

Upload fine-tuning-qwen-3.ipynb

Browse files
Files changed (1) hide show
  1. fine-tuning-qwen-3.ipynb +1 -0
fine-tuning-qwen-3.ipynb ADDED
@@ -0,0 +1 @@
 
 
1
+ {"metadata":{"kernelspec":{"language":"python","display_name":"Python 3","name":"python3"},"language_info":{"name":"python","version":"3.11.11","mimetype":"text/x-python","codemirror_mode":{"name":"ipython","version":3},"pygments_lexer":"ipython3","nbconvert_exporter":"python","file_extension":".py"},"kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":363139,"sourceType":"modelInstanceVersion","modelInstanceId":301517,"modelId":322000}],"dockerImageVersionId":31011,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"## Setting Up","metadata":{}},{"cell_type":"code","source":"%%capture\n%pip install -U accelerate \n%pip install -U peft \n%pip install -U trl \n%pip install -U bitsandbytes\n%pip install -U transformers","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:30:20.941846Z","iopub.execute_input":"2025-04-29T11:30:20.942114Z","iopub.status.idle":"2025-04-29T11:32:02.331480Z","shell.execute_reply.started":"2025-04-29T11:30:20.942085Z","shell.execute_reply":"2025-04-29T11:32:02.330451Z"}},"outputs":[],"execution_count":1},{"cell_type":"code","source":"from huggingface_hub import login\nfrom kaggle_secrets import UserSecretsClient\n\nuser_secrets = UserSecretsClient()\nHF_TOKEN= user_secrets.get_secret(\"HUGGINGFACE_TOKEN\")\n\nlogin(HF_TOKEN)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:32:02.333815Z","iopub.execute_input":"2025-04-29T11:32:02.334074Z","iopub.status.idle":"2025-04-29T11:32:03.549618Z","shell.execute_reply.started":"2025-04-29T11:32:02.334052Z","shell.execute_reply":"2025-04-29T11:32:03.548783Z"}},"outputs":[],"execution_count":2},{"cell_type":"markdown","source":"## Loading the model and tokenizer","metadata":{}},{"cell_type":"code","source":"from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\nimport kagglehub\nimport torch\n\n# Get the checkpoint URL from Kaggle Models Hub\nmodel_dir = kagglehub.model_download(\"qwen-lm/qwen-3/transformers/8b\")","metadata":{"_uuid":"059b3da3-4a60-40ed-8a3c-2c395b2c3a8d","_cell_guid":"82e08da0-4ee2-4235-a54f-c2bed3609a2b","trusted":true,"collapsed":false,"jupyter":{"outputs_hidden":false},"execution":{"iopub.status.busy":"2025-04-29T11:32:03.550543Z","iopub.execute_input":"2025-04-29T11:32:03.550757Z","iopub.status.idle":"2025-04-29T11:32:12.111828Z","shell.execute_reply.started":"2025-04-29T11:32:03.550741Z","shell.execute_reply":"2025-04-29T11:32:12.111273Z"}},"outputs":[],"execution_count":3},{"cell_type":"code","source":"bnb_config = BitsAndBytesConfig(\n load_in_4bit=True, # quantise to 4-bit\n bnb_4bit_quant_type=\"nf4\", # normal-float-4 codebook\n bnb_4bit_use_double_quant=False, # QLoRA-style double quant\n bnb_4bit_compute_dtype=\"float16\" # mat-muls run in bfloat16\n)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:32:12.112597Z","iopub.execute_input":"2025-04-29T11:32:12.112925Z","iopub.status.idle":"2025-04-29T11:32:12.120207Z","shell.execute_reply.started":"2025-04-29T11:32:12.112906Z","shell.execute_reply":"2025-04-29T11:32:12.119503Z"}},"outputs":[],"execution_count":4},{"cell_type":"code","source":"# Load tokenizer & model\ntokenizer = AutoTokenizer.from_pretrained(model_dir, use_fast=True)\n\nmodel = AutoModelForCausalLM.from_pretrained(\n model_dir,\n quantization_config=bnb_config, \n device_map=\"auto\", \n torch_dtype=\"float16\",\n trust_remote_code=True \n)\n\nmodel.config.use_cache = False\nmodel.config.pretraining_tp = 1","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:32:12.121034Z","iopub.execute_input":"2025-04-29T11:32:12.121380Z","iopub.status.idle":"2025-04-29T11:33:51.023302Z","shell.execute_reply.started":"2025-04-29T11:32:12.121350Z","shell.execute_reply":"2025-04-29T11:33:51.022704Z"}},"outputs":[{"name":"stderr","text":"2025-04-29 11:32:14.475276: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:477] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\nWARNING: All log messages before absl::InitializeLog() is called are written to STDERR\nE0000 00:00:1745926334.647210 31 cuda_dnn.cc:8310] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\nE0000 00:00:1745926334.701590 31 cuda_blas.cc:1418] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n","output_type":"stream"},{"output_type":"display_data","data":{"text/plain":"Loading checkpoint shards: 0%| | 0/5 [00:00<?, ?it/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b874ecf1727648298e5c33b47776fd2c"}},"metadata":{}}],"execution_count":5},{"cell_type":"markdown","source":"## Loading and processing the dataset","metadata":{}},{"cell_type":"code","source":"prompt_style = \"\"\"### Instruction:\nYou are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning. \nPlease respond to the following medical question with a brief diagnosis.\n\n### Question:\n{}\n\n### Response:\n{}\"\"\"","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:33:51.024105Z","iopub.execute_input":"2025-04-29T11:33:51.024658Z","iopub.status.idle":"2025-04-29T11:33:51.028334Z","shell.execute_reply.started":"2025-04-29T11:33:51.024638Z","shell.execute_reply":"2025-04-29T11:33:51.027743Z"}},"outputs":[],"execution_count":6},{"cell_type":"code","source":"EOS_TOKEN = tokenizer.eos_token # Must add EOS_TOKEN\n\ndef formatting_prompts_func(examples):\n inputs = examples[\"Open-ended Verifiable Question\"]\n outputs = examples[\"Ground-True Answer\"]\n texts = []\n for question, response in zip(inputs, outputs):\n # Append the EOS token to the response if it's not already there\n if not response.endswith(tokenizer.eos_token):\n response += tokenizer.eos_token\n text = prompt_style.format(question,response)\n texts.append(text)\n return {\"text\": texts}\n","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:33:51.030199Z","iopub.execute_input":"2025-04-29T11:33:51.030394Z","iopub.status.idle":"2025-04-29T11:33:51.046674Z","shell.execute_reply.started":"2025-04-29T11:33:51.030380Z","shell.execute_reply":"2025-04-29T11:33:51.046047Z"}},"outputs":[],"execution_count":7},{"cell_type":"code","source":"from datasets import load_dataset\n\n\ndataset = load_dataset(\n \"FreedomIntelligence/medical-o1-verifiable-problem\",\n split=\"train[0:500]\",\n trust_remote_code=True,\n)\ndataset = dataset.map(\n formatting_prompts_func,\n batched=True,\n)\ndataset[\"text\"][0]","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:42:22.074012Z","iopub.execute_input":"2025-04-29T11:42:22.074297Z","iopub.status.idle":"2025-04-29T11:42:26.530357Z","shell.execute_reply.started":"2025-04-29T11:42:22.074277Z","shell.execute_reply":"2025-04-29T11:42:26.529797Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"medical_o1_verifiable_problem.json: 0%| | 0.00/12.2M [00:00<?, ?B/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"16f6ca73531f4adbb836aedf4b7af4f0"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Generating train split: 0%| | 0/40644 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"8b638ae1711e4ea0b5048b8cc70489e2"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Map: 0%| | 0/500 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"bca764ec4b9745eb81c5d866fd9faf05"}},"metadata":{}},{"execution_count":9,"output_type":"execute_result","data":{"text/plain":"'### Instruction:\\nYou are a medical expert with advanced knowledge in clinical reasoning, diagnostics, and treatment planning. \\nPlease respond to the following medical question with a brief diagnosis.\\n\\n### Question:\\nAn 88-year-old woman with osteoarthritis is experiencing mild epigastric discomfort and has vomited material resembling coffee grounds multiple times. Considering her use of naproxen, what is the most likely cause of her gastrointestinal blood loss?\\n\\n### Response:\\nGastric ulcer<|im_end|>'"},"metadata":{}}],"execution_count":9},{"cell_type":"code","source":"from transformers import DataCollatorForLanguageModeling\n\ndata_collator = DataCollatorForLanguageModeling(\n tokenizer=tokenizer,\n mlm=False\n)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:42:30.235354Z","iopub.execute_input":"2025-04-29T11:42:30.235948Z","iopub.status.idle":"2025-04-29T11:42:30.251570Z","shell.execute_reply.started":"2025-04-29T11:42:30.235925Z","shell.execute_reply":"2025-04-29T11:42:30.250979Z"}},"outputs":[],"execution_count":10},{"cell_type":"markdown","source":"## Model inference before fine-tuning","metadata":{}},{"cell_type":"code","source":"question = dataset[0]['Open-ended Verifiable Question']\ninputs = tokenizer(\n [prompt_style.format(question, \"\") + tokenizer.eos_token],\n return_tensors=\"pt\"\n).to(\"cuda\")\n\noutputs = model.generate(\n input_ids=inputs.input_ids,\n attention_mask=inputs.attention_mask,\n max_new_tokens=20,\n eos_token_id=tokenizer.eos_token_id,\n use_cache=True,\n)\nresponse = tokenizer.batch_decode(outputs, skip_special_tokens=True)\nprint(response[0].split(\"### Response:\")[1])","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:42:32.592546Z","iopub.execute_input":"2025-04-29T11:42:32.593080Z","iopub.status.idle":"2025-04-29T11:42:35.171789Z","shell.execute_reply.started":"2025-04-29T11:42:32.593055Z","shell.execute_reply":"2025-04-29T11:42:35.171094Z"}},"outputs":[{"name":"stdout","text":"\n\n\nThe most likely cause of the gastrointestinal blood loss in this 88-year-old woman is **\n","output_type":"stream"}],"execution_count":11},{"cell_type":"markdown","source":"## Setting up the model","metadata":{}},{"cell_type":"code","source":"from peft import LoraConfig, get_peft_model\n\n# LoRA config\npeft_config = LoraConfig(\n lora_alpha=16, # Scaling factor for LoRA\n lora_dropout=0.05, # Add slight dropout for regularization\n r=64, # Rank of the LoRA update matrices\n bias=\"none\", # No bias reparameterization\n task_type=\"CAUSAL_LM\", # Task type: Causal Language Modeling\n target_modules=[\n \"q_proj\",\n \"k_proj\",\n \"v_proj\",\n \"o_proj\",\n \"gate_proj\",\n \"up_proj\",\n \"down_proj\",\n ], # Target modules for LoRA\n)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:42:35.172925Z","iopub.execute_input":"2025-04-29T11:42:35.173186Z","iopub.status.idle":"2025-04-29T11:42:35.251404Z","shell.execute_reply.started":"2025-04-29T11:42:35.173159Z","shell.execute_reply":"2025-04-29T11:42:35.250915Z"}},"outputs":[],"execution_count":12},{"cell_type":"code","source":"from trl import SFTTrainer\nfrom transformers import TrainingArguments\n\n\n# Training Arguments\ntraining_arguments = TrainingArguments(\n output_dir=\"output\",\n per_device_train_batch_size=1,\n per_device_eval_batch_size=1,\n gradient_accumulation_steps=2,\n optim=\"paged_adamw_32bit\",\n num_train_epochs=1,\n logging_steps=0.2,\n warmup_steps=10,\n logging_strategy=\"steps\",\n learning_rate=2e-4,\n fp16=False,\n bf16=False,\n group_by_length=True,\n report_to=\"none\"\n)\n\n# Initialize the Trainer\ntrainer = SFTTrainer(\n model=model,\n args=training_arguments,\n train_dataset=dataset,\n peft_config=peft_config,\n data_collator=data_collator,\n)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:42:39.129488Z","iopub.execute_input":"2025-04-29T11:42:39.130100Z","iopub.status.idle":"2025-04-29T11:42:45.998388Z","shell.execute_reply.started":"2025-04-29T11:42:39.130077Z","shell.execute_reply":"2025-04-29T11:42:45.997789Z"}},"outputs":[{"output_type":"display_data","data":{"text/plain":"Converting train dataset to ChatML: 0%| | 0/500 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"db7d207209bf4f2086770b5e8d0443a8"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Adding EOS to train dataset: 0%| | 0/500 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"b831ea25d1ea4e0b9730d1cd8cd783d2"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Tokenizing train dataset: 0%| | 0/500 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"f1402cd81b0a45a3abe12a337e7b7c02"}},"metadata":{}},{"output_type":"display_data","data":{"text/plain":"Truncating train dataset: 0%| | 0/500 [00:00<?, ? examples/s]","application/vnd.jupyter.widget-view+json":{"version_major":2,"version_minor":0,"model_id":"d08d1cfb5f3241c18084a419152f2fa7"}},"metadata":{}},{"name":"stderr","text":"No label_names provided for model class `PeftModelForCausalLM`. Since `PeftModel` hides base models input arguments, if label_names is not given, label_names can't be set automatically within `Trainer`. Note that empty label_names list will be used instead.\n","output_type":"stream"}],"execution_count":13},{"cell_type":"markdown","source":"## Model Training","metadata":{}},{"cell_type":"code","source":"import gc, torch\ngc.collect()\ntorch.cuda.empty_cache()\nmodel.config.use_cache = False\ntrainer.train()","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:42:52.608611Z","iopub.execute_input":"2025-04-29T11:42:52.609177Z","iopub.status.idle":"2025-04-29T11:42:54.382926Z","shell.execute_reply.started":"2025-04-29T11:42:52.609154Z","shell.execute_reply":"2025-04-29T11:42:54.381848Z"}},"outputs":[{"traceback":["\u001b[0;31m---------------------------------------------------------------------------\u001b[0m","\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)","\u001b[0;32m/tmp/ipykernel_31/138139243.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcuda\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mempty_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0muse_cache\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mtrainer\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mtrain\u001b[0;34m(self, resume_from_checkpoint, trial, ignore_keys_for_eval, **kwargs)\u001b[0m\n\u001b[1;32m 2243\u001b[0m \u001b[0mhf_hub_utils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menable_progress_bars\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2244\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2245\u001b[0;31m return inner_training_loop(\n\u001b[0m\u001b[1;32m 2246\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2247\u001b[0m \u001b[0mresume_from_checkpoint\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mresume_from_checkpoint\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36m_inner_training_loop\u001b[0;34m(self, batch_size, args, resume_from_checkpoint, trial, ignore_keys_for_eval)\u001b[0m\n\u001b[1;32m 2558\u001b[0m )\n\u001b[1;32m 2559\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mcontext\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 2560\u001b[0;31m \u001b[0mtr_loss_step\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtraining_step\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2561\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2562\u001b[0m if (\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mtraining_step\u001b[0;34m(self, model, inputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 3734\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3735\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_loss_context_manager\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3736\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute_loss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3737\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3738\u001b[0m \u001b[0;32mdel\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/trl/trainer/sft_trainer.py\u001b[0m in \u001b[0;36mcompute_loss\u001b[0;34m(self, model, inputs, return_outputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 652\u001b[0m \"\"\"\n\u001b[1;32m 653\u001b[0m \u001b[0mmode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m\"eval\"\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrol\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshould_evaluate\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0;34m\"train\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 654\u001b[0;31m (loss, outputs) = super().compute_loss(\n\u001b[0m\u001b[1;32m 655\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreturn_outputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 656\u001b[0m )\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/trainer.py\u001b[0m in \u001b[0;36mcompute_loss\u001b[0;34m(self, model, inputs, return_outputs, num_items_in_batch)\u001b[0m\n\u001b[1;32m 3799\u001b[0m \u001b[0mloss_kwargs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m\"num_items_in_batch\"\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3800\u001b[0m \u001b[0minputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mloss_kwargs\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 3801\u001b[0;31m \u001b[0moutputs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3802\u001b[0m \u001b[0;31m# Save past state if it exists\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3803\u001b[0m \u001b[0;31m# TODO: this needs to be fixed and made cleaner later.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1734\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1735\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1736\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1737\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1738\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1745\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1746\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1748\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1749\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/peft/peft_model.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input_ids, attention_mask, inputs_embeds, labels, output_attentions, output_hidden_states, return_dict, task_ids, **kwargs)\u001b[0m\n\u001b[1;32m 1755\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_enable_peft_forward_hooks\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1756\u001b[0m \u001b[0mkwargs\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mk\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mspecial_peft_forward_args\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1757\u001b[0;31m return self.base_model(\n\u001b[0m\u001b[1;32m 1758\u001b[0m \u001b[0minput_ids\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minput_ids\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1759\u001b[0m \u001b[0mattention_mask\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mattention_mask\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_wrapped_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1734\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_compiled_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;31m# type: ignore[misc]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1735\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1736\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_call_impl\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1737\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1738\u001b[0m \u001b[0;31m# torchrec tests the code consistency with the following code\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/torch/nn/modules/module.py\u001b[0m in \u001b[0;36m_call_impl\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1745\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_pre_hooks\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0m_global_backward_hooks\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1746\u001b[0m or _global_forward_hooks or _global_forward_pre_hooks):\n\u001b[0;32m-> 1747\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mforward_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1748\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1749\u001b[0m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/peft/tuners/tuners_utils.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 191\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 192\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mAny\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 193\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 194\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 195\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_pre_injection_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mModule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mconfig\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mPeftConfig\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0madapter_name\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m->\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/accelerate/hooks.py\u001b[0m in \u001b[0;36mnew_forward\u001b[0;34m(module, *args, **kwargs)\u001b[0m\n\u001b[1;32m 174\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_old_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 175\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 176\u001b[0;31m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_old_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 177\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mmodule\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_hf_hook\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpost_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodule\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 178\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/utils/generic.py\u001b[0m in \u001b[0;36mwrapper\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 963\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 964\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 965\u001b[0;31m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 966\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mis_requested_to_return_tuple\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mis_configured_to_return_tuple\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mis_top_level_module\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 967\u001b[0m \u001b[0moutput\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto_tuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/utils/deprecation.py\u001b[0m in \u001b[0;36mwrapped_func\u001b[0;34m(*args, **kwargs)\u001b[0m\n\u001b[1;32m 170\u001b[0m \u001b[0mwarnings\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mwarn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mFutureWarning\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstacklevel\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 171\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 172\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfunc\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 173\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 174\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mwrapped_func\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/models/qwen3/modeling_qwen3.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, input_ids, attention_mask, position_ids, past_key_values, inputs_embeds, labels, use_cache, output_attentions, output_hidden_states, cache_position, logits_to_keep, **kwargs)\u001b[0m\n\u001b[1;32m 868\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 869\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mlabels\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 870\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mloss_function\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlabels\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlabels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvocab_size\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mconfig\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvocab_size\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 871\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 872\u001b[0m return CausalLMOutputWithPast(\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/loss/loss_utils.py\u001b[0m in \u001b[0;36mForCausalLMLoss\u001b[0;34m(logits, labels, vocab_size, num_items_in_batch, ignore_index, shift_labels, **kwargs)\u001b[0m\n\u001b[1;32m 61\u001b[0m \u001b[0;31m# Enable model parallelism\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 62\u001b[0m \u001b[0mshift_labels\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mshift_labels\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 63\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mfixed_cross_entropy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlogits\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mshift_labels\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mignore_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 64\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 65\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;32m/usr/local/lib/python3.11/dist-packages/transformers/loss/loss_utils.py\u001b[0m in \u001b[0;36mfixed_cross_entropy\u001b[0;34m(source, target, num_items_in_batch, ignore_index, **kwargs)\u001b[0m\n\u001b[1;32m 35\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnn\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfunctional\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcross_entropy\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0msource\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mignore_index\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mignore_index\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mreduction\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mreduction\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 36\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mreduction\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"sum\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 37\u001b[0;31m \u001b[0mloss\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss\u001b[0m \u001b[0;34m/\u001b[0m \u001b[0mnum_items_in_batch\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 38\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mloss\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n","\u001b[0;31mRuntimeError\u001b[0m: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!"],"ename":"RuntimeError","evalue":"Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!","output_type":"error"}],"execution_count":14},{"cell_type":"markdown","source":"## Model inference after fine-tuning","metadata":{}},{"cell_type":"code","source":"question = dataset[0]['Open-ended Verifiable Question']\ninputs = tokenizer(\n [prompt_style.format(question, \"\") + tokenizer.eos_token],\n return_tensors=\"pt\"\n).to(\"cuda\")\n\noutputs = model.generate(\n input_ids=inputs.input_ids,\n attention_mask=inputs.attention_mask,\n max_new_tokens=20,\n eos_token_id=tokenizer.eos_token_id,\n use_cache=True,\n)\nresponse = tokenizer.batch_decode(outputs, skip_special_tokens=True)\nprint(response[0].split(\"### Response:\")[1])","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:34:05.650928Z","iopub.status.idle":"2025-04-29T11:34:05.651205Z","shell.execute_reply.started":"2025-04-29T11:34:05.651092Z","shell.execute_reply":"2025-04-29T11:34:05.651104Z"}},"outputs":[],"execution_count":null},{"cell_type":"code","source":"question = dataset[150]['Open-ended Verifiable Question']\ninputs = tokenizer(\n [prompt_style.format(question, \"\") + tokenizer.eos_token],\n return_tensors=\"pt\"\n).to(\"cuda\")\n\noutputs = model.generate(\n input_ids=inputs.input_ids,\n attention_mask=inputs.attention_mask,\n max_new_tokens=20,\n eos_token_id=tokenizer.eos_token_id,\n use_cache=True,\n)\nresponse = tokenizer.batch_decode(outputs, skip_special_tokens=True)\nprint(response[0].split(\"### Response:\")[1])","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:34:05.653393Z","iopub.status.idle":"2025-04-29T11:34:05.653701Z","shell.execute_reply.started":"2025-04-29T11:34:05.653563Z","shell.execute_reply":"2025-04-29T11:34:05.653576Z"}},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"## Saving the model","metadata":{}},{"cell_type":"code","source":"new_model_name = \"Qwen-3-8B-Medical-Reasoning-R1\"\nmodel.push_to_hub(new_model_name)\ntokenizer.push_to_hub(new_model_name)","metadata":{"trusted":true,"execution":{"iopub.status.busy":"2025-04-29T11:34:05.654483Z","iopub.status.idle":"2025-04-29T11:34:05.654718Z","shell.execute_reply.started":"2025-04-29T11:34:05.654595Z","shell.execute_reply":"2025-04-29T11:34:05.654604Z"}},"outputs":[],"execution_count":null}]}