diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/README.md b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5a140c69d5c2887bfe0600718466c0cbcc4f359 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: https://huggingface.co/robotics-diffusion-transformer/rdt-1b +- Docs: [More Information Needed] \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/config.json b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8fc22a260a06ec3d871d840f4308c0d9c8227c9a --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/config.json @@ -0,0 +1,49 @@ +{ + "action_dim": 2, + "ema": { + "inv_gamma": 1.0, + "max_value": 0.9999, + "min_value": 0.0, + "power": 0.75, + "update_after_step": 0 + }, + "img_adaptor": "mlp2x_gelu", + "img_cond_len": 2916, + "img_pos_embed_config": [ + [ + "image", + [ + 2, + 2, + -729 + ] + ] + ], + "img_token_dim": 1152, + "lang_adaptor": "mlp2x_gelu", + "lang_pos_embed_config": [ + [ + "lang", + -1024 + ] + ], + "lang_token_dim": 3584, + "max_lang_cond_len": 1024, + "noise_scheduler": { + "beta_schedule": "squaredcos_cap_v2", + "clip_sample": false, + "num_inference_timesteps": 5, + "num_train_timesteps": 1000, + "prediction_type": "sample", + "type": "ddpm" + }, + "pred_horizon": 4, + "rdt": { + "cond_pos_embed_type": "multimodal", + "depth": 28, + "hidden_size": 2048, + "num_heads": 32 + }, + "state_adaptor": "mlp3x_gelu", + "state_token_dim": 2 +} \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/ema/model.safetensors b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/ema/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..53419415abdd748f55aa8416ca2aacd9be868e5a --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/ema/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7522fec1fefdaf5d519c646492849f79e6662b3b5e003d96b8bf98b484dd1e84 +size 2437379836 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/latest b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/latest new file mode 100644 index 0000000000000000000000000000000000000000..7b2c8602be034ae63f23b293f8d037fa7afa0c54 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/latest @@ -0,0 +1 @@ +pytorch_model \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model.bin b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..a9dfa85e5d396319c9c464dfa0d0457b49903f74 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6efee3028c24664050d666927b14d5a7a34590c1af3c89869ab93fc2dc874321 +size 2437429626 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..07078da14ed2d7f996dfd542f642938192299502 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a8c89b0e35d171ebfa58596b9c89935ae7546b3b58eac7be5470371e1751b1b +size 3655985904 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f9f62b5fc47aebb307d6ec720112ad772761c129 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32afc2a5bd489709d2999cf6731000048547f5f695629881b3fcb655e7ecf7f5 +size 3655985968 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..93fdad2b1d70d2b375b95122f62e5bb6f67e3a1a --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bc240f986158546b97a5199c1c24f84d2acbbd81d4a8ac90923ace1e0f88e60c +size 3655986224 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2b13899e21569edf55da9720149ccacb2e16dbe6 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c51c5d7e8ebeb6958e5ee9cd1a1902ba98bd92270aac7aa4b43f1cc9c1727218 +size 3655986416 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/mp_rank_00_model_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..7a721055ac56e4ec34a20af0c5d99ec390362138 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/pytorch_model/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ab4ee2d16bfdfae8d6e3bd7f39bc76d70a669a4b108f62af7f020b3531d00cf8 +size 2437472876 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_0.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..b85c7000aab54535392b4c5ab9c452b6fa408d31 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3447d27e28661f989f75037c9261766da78a679c71e507ddb80a05a14f64793e +size 15124 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_1.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_1.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e21a4b7a3987faec9d183c6c63a87956f5c52b51 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_1.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9731315115931d65b66d4eaf79a17a5b956732fa0fb5becdeb58860f1e5a36d +size 15060 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_2.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..fb3ae3c36c42c22a11884511352e225f8a6096fc --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_2.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:61d52f3e71ef5bdce09485f4f7aa914a28d4e6b40f98da013e76e3d6d3b8ab86 +size 15060 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_3.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_3.pkl new file mode 100644 index 0000000000000000000000000000000000000000..12e4692e235a73c7d5a1488f3e8794ee5fbcf372 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/random_states_3.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ab3b81e3c2a7a80ef466abb3a9fb9b81c577072f6047f61f18c56ebd30d4e30 +size 14996 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/scheduler.bin b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/scheduler.bin new file mode 100644 index 0000000000000000000000000000000000000000..6fe8d695bd13f9afd76c81a49a281e8a6bf6b288 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/scheduler.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f16f2c5df6f7f11b1b163edd00237d4e57b97e8d09d9544d52df274294b4163 +size 1000 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-42000/zero_to_fp32.py b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-42000/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/README.md b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5a140c69d5c2887bfe0600718466c0cbcc4f359 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: https://huggingface.co/robotics-diffusion-transformer/rdt-1b +- Docs: [More Information Needed] \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/config.json b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8fc22a260a06ec3d871d840f4308c0d9c8227c9a --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/config.json @@ -0,0 +1,49 @@ +{ + "action_dim": 2, + "ema": { + "inv_gamma": 1.0, + "max_value": 0.9999, + "min_value": 0.0, + "power": 0.75, + "update_after_step": 0 + }, + "img_adaptor": "mlp2x_gelu", + "img_cond_len": 2916, + "img_pos_embed_config": [ + [ + "image", + [ + 2, + 2, + -729 + ] + ] + ], + "img_token_dim": 1152, + "lang_adaptor": "mlp2x_gelu", + "lang_pos_embed_config": [ + [ + "lang", + -1024 + ] + ], + "lang_token_dim": 3584, + "max_lang_cond_len": 1024, + "noise_scheduler": { + "beta_schedule": "squaredcos_cap_v2", + "clip_sample": false, + "num_inference_timesteps": 5, + "num_train_timesteps": 1000, + "prediction_type": "sample", + "type": "ddpm" + }, + "pred_horizon": 4, + "rdt": { + "cond_pos_embed_type": "multimodal", + "depth": 28, + "hidden_size": 2048, + "num_heads": 32 + }, + "state_adaptor": "mlp3x_gelu", + "state_token_dim": 2 +} \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/ema/model.safetensors b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/ema/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..7de27d0000b798a975cad06c923d29cce4f8ee77 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/ema/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08d498dfe7a9e4c2bd908104feb97ad6eecdee5fad16b8ef86e93930bd37ff25 +size 2437379836 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/latest b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/latest new file mode 100644 index 0000000000000000000000000000000000000000..7b2c8602be034ae63f23b293f8d037fa7afa0c54 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/latest @@ -0,0 +1 @@ +pytorch_model \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model.bin b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..712db8f9742f7bc7585e635286ceafd8d7350ef4 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c24469d0ff6386783ab7587e3a71cbc2899c22e0fe3c037acd63262e091e0092 +size 2437429626 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..d5b15f25ad9ec4d15602b828118df27d14d7a62f --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:338a0581bc08e297cd517e8387d152388781dbd0c4bda3d365025b8886bd7d3b +size 3655985904 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..cd0ac2c921c86981a7135fc38e07c7b391fea83c --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:277ae3096300358234f9d1aa6dc63520793b44699df275c367735e45cb83ee1d +size 3655985968 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..685a4398dbc73b9a2ca94579f4a9207d150650fe --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:78556c75ce1b3bb440b2f0304e4ca230edec42de2172bb2e66708a72a49ff7de +size 3655986224 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..f772e4b89883ac31cb58456f5b00d8be081916f0 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9304fc11377e759cf026e87f5c68d8244841302e3fb830e146f7180ea389c2a +size 3655986416 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/mp_rank_00_model_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..9a61154306771b63b2cd3c1682cefbd47c8e8781 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/pytorch_model/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a8c03ad29c4505c6f21119999f56540447529d6a63214ac0d61c513f793dbf4 +size 2437472876 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_0.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..6c4a999af1526b99f2c5dd096a875fd72a1e42b5 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef2f29dd1793b6576b459bf7878fd19505ed34de286f592bc58f42112f4b6430 +size 15124 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_1.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_1.pkl new file mode 100644 index 0000000000000000000000000000000000000000..298b14575c2bf44721f157cde48b9dcbc0d0dcf2 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_1.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9f65e5af79b17f5b779ac6455004ffdf90a186a96c2d14ff4914694294bbf540 +size 15060 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_2.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ca99ec2d226ce0393994226bd647365c533862da --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_2.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f6fc7a8e8b7c389bc4191ab06a9da5b420242e2005b33283f11d7af73372464e +size 15060 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_3.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_3.pkl new file mode 100644 index 0000000000000000000000000000000000000000..814c017ff1dd33c66895a791e5aab16b88a0e3ed --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/random_states_3.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9061ce264c2321b23cb7e51b3213d7ff7c1b2de011ba7907d69ddce8c7024e8a +size 14996 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/scheduler.bin b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/scheduler.bin new file mode 100644 index 0000000000000000000000000000000000000000..780672b8273c61e1533af50bfbc052c5ba7e1ee2 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/scheduler.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b57331137e811790fcada18a004f8a1270e8f141d3aa67bb29dfb65e4194304f +size 1000 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-52000/zero_to_fp32.py b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-52000/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/README.md b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e5a140c69d5c2887bfe0600718466c0cbcc4f359 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/README.md @@ -0,0 +1,9 @@ +--- +tags: +- model_hub_mixin +- pytorch_model_hub_mixin +--- + +This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration: +- Library: https://huggingface.co/robotics-diffusion-transformer/rdt-1b +- Docs: [More Information Needed] \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/config.json b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8fc22a260a06ec3d871d840f4308c0d9c8227c9a --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/config.json @@ -0,0 +1,49 @@ +{ + "action_dim": 2, + "ema": { + "inv_gamma": 1.0, + "max_value": 0.9999, + "min_value": 0.0, + "power": 0.75, + "update_after_step": 0 + }, + "img_adaptor": "mlp2x_gelu", + "img_cond_len": 2916, + "img_pos_embed_config": [ + [ + "image", + [ + 2, + 2, + -729 + ] + ] + ], + "img_token_dim": 1152, + "lang_adaptor": "mlp2x_gelu", + "lang_pos_embed_config": [ + [ + "lang", + -1024 + ] + ], + "lang_token_dim": 3584, + "max_lang_cond_len": 1024, + "noise_scheduler": { + "beta_schedule": "squaredcos_cap_v2", + "clip_sample": false, + "num_inference_timesteps": 5, + "num_train_timesteps": 1000, + "prediction_type": "sample", + "type": "ddpm" + }, + "pred_horizon": 4, + "rdt": { + "cond_pos_embed_type": "multimodal", + "depth": 28, + "hidden_size": 2048, + "num_heads": 32 + }, + "state_adaptor": "mlp3x_gelu", + "state_token_dim": 2 +} \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/ema/model.safetensors b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/ema/model.safetensors new file mode 100644 index 0000000000000000000000000000000000000000..9eec2292638ff2954f7bcd39300f36c0864de661 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/ema/model.safetensors @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a978e3980816a29500bb05a1c0aec9a0a2ec5989127f69ca4ded97dd88923d59 +size 2437379836 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/latest b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/latest new file mode 100644 index 0000000000000000000000000000000000000000..7b2c8602be034ae63f23b293f8d037fa7afa0c54 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/latest @@ -0,0 +1 @@ +pytorch_model \ No newline at end of file diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model.bin b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model.bin new file mode 100644 index 0000000000000000000000000000000000000000..06a10258ea52db561c6c96521b9880cbc09bcb35 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b5931885ac8c0a199541805a6b84ce008c033e8118c476602967db3c0e88c1b +size 2437429626 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..2487e6c60bb0a6541d9d4124276c67d1fd79187f --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc85eafd69930942941839f489d22c67abf3492197606a694fdb4240bb0d2718 +size 3655985904 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..6826184b27c93d37cbc37d63e932c8c52f15c8a8 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29620f7766668e1a73418bc6decf419251f7713cd7a3f171866fe1c2de46f605 +size 3655985968 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..0b96725fd96cd837289062686ca14a2bc7d7505e --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afd7f7a2d09e5a1976979f1ca42636ac5546779c66e91cdd71768a9383df01ce +size 3655986224 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..bd899e14a40fd7530a75349614ceb16df7201e17 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:53f7980e02e218458530d6c1148b428c283bd3211a880e6760cf1a511ec1e34b +size 3655986416 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/mp_rank_00_model_states.pt b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/mp_rank_00_model_states.pt new file mode 100644 index 0000000000000000000000000000000000000000..ce714a0ff35bb34d82e575aec57beb7e63c86045 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/pytorch_model/mp_rank_00_model_states.pt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7ac51f57191386b8d711d90426fc1dbe1cc16f30fcab9dbcf6584572edf7b172 +size 2437472876 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_0.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_0.pkl new file mode 100644 index 0000000000000000000000000000000000000000..84f3ab83d22ccd20e4243bd99669d4f0426834e0 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_0.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:500fa620859b5201ef56598720aec3bbe9da940382549361d4b5170df3620413 +size 15124 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_1.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_1.pkl new file mode 100644 index 0000000000000000000000000000000000000000..8c508e1876be141e5873447b864e0e4d508700ec --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_1.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b8f5bdacfef43f1b5236db656871d367ecd4fe809c84d32f308ef36ba24ff52 +size 15060 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_2.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_2.pkl new file mode 100644 index 0000000000000000000000000000000000000000..ec8954bcba83697922d1368ad06048fbf1ecfe8d --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_2.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0d2902db02c991b96f78373a2423eba64c274f4adcce6a120e6dc492fcb6920 +size 15060 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_3.pkl b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_3.pkl new file mode 100644 index 0000000000000000000000000000000000000000..515bc4b602beb86f12709812b817cf2c74e9d5e8 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/random_states_3.pkl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45f08700e6490ca6b89fca3f97ccec27f8c68c958d3b175013f6e9d1c307cfc4 +size 14996 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/scheduler.bin b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/scheduler.bin new file mode 100644 index 0000000000000000000000000000000000000000..747bbe3e21227a3093c7ef18a1de21ceee82da55 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/scheduler.bin @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f61c832a82b62bbf955b403d9dc5dd199f8e544717776463dc870931eb4312d +size 1000 diff --git a/afford_1b_three_qwen_warmup_0224/checkpoint-62000/zero_to_fp32.py b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/zero_to_fp32.py new file mode 100644 index 0000000000000000000000000000000000000000..0e759146cadd92ddfefab3680146c2bd6a2b5c04 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/checkpoint-62000/zero_to_fp32.py @@ -0,0 +1,760 @@ +#!/usr/bin/env python + +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets +# copied into the top level checkpoint dir, so the user can easily do the conversion at any point in +# the future. Once extracted, the weights don't require DeepSpeed and can be used in any +# application. +# +# example: +# python zero_to_fp32.py . output_dir/ +# or +# python zero_to_fp32.py . output_dir/ --safe_serialization + +import argparse +import torch +import glob +import math +import os +import re +import gc +import json +import numpy as np +from tqdm import tqdm +from collections import OrderedDict +from dataclasses import dataclass + +# while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with +# DeepSpeed data structures it has to be available in the current python environment. +from deepspeed.utils import logger +from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS, + FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES, + FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS) + + +@dataclass +class zero_model_state: + buffers: dict() + param_shapes: dict() + shared_params: list + ds_version: int + frozen_param_shapes: dict() + frozen_param_fragments: dict() + + +debug = 0 + +# load to cpu +device = torch.device('cpu') + + +def atoi(text): + return int(text) if text.isdigit() else text + + +def natural_keys(text): + ''' + alist.sort(key=natural_keys) sorts in human order + http://nedbatchelder.com/blog/200712/human_sorting.html + (See Toothy's implementation in the comments) + ''' + return [atoi(c) for c in re.split(r'(\d+)', text)] + + +def get_model_state_file(checkpoint_dir, zero_stage): + if not os.path.isdir(checkpoint_dir): + raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist") + + # there should be only one file + if zero_stage <= 2: + file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt") + elif zero_stage == 3: + file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt") + + if not os.path.exists(file): + raise FileNotFoundError(f"can't find model states file at '{file}'") + + return file + + +def get_checkpoint_files(checkpoint_dir, glob_pattern): + # XXX: need to test that this simple glob rule works for multi-node setup too + ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys) + + if len(ckpt_files) == 0: + raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'") + + return ckpt_files + + +def get_optim_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt") + + +def get_model_state_files(checkpoint_dir): + return get_checkpoint_files(checkpoint_dir, "*_model_states.pt") + + +def parse_model_states(files): + zero_model_states = [] + for file in files: + state_dict = torch.load(file, map_location=device, weights_only=False) + + if BUFFER_NAMES not in state_dict: + raise ValueError(f"{file} is not a model state checkpoint") + buffer_names = state_dict[BUFFER_NAMES] + if debug: + print("Found buffers:", buffer_names) + + # recover just the buffers while restoring them to fp32 if they were saved in fp16 + buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names} + param_shapes = state_dict[PARAM_SHAPES] + + # collect parameters that are included in param_shapes + param_names = [] + for s in param_shapes: + for name in s.keys(): + param_names.append(name) + + # update with frozen parameters + frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None) + if frozen_param_shapes is not None: + if debug: + print(f"Found frozen_param_shapes: {frozen_param_shapes}") + param_names += list(frozen_param_shapes.keys()) + + # handle shared params + shared_params = [[k, v] for k, v in state_dict["shared_params"].items()] + + ds_version = state_dict.get(DS_VERSION, None) + + frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None) + + z_model_state = zero_model_state(buffers=buffers, + param_shapes=param_shapes, + shared_params=shared_params, + ds_version=ds_version, + frozen_param_shapes=frozen_param_shapes, + frozen_param_fragments=frozen_param_fragments) + zero_model_states.append(z_model_state) + + return zero_model_states + + +def parse_optim_states(files, ds_checkpoint_dir): + total_files = len(files) + state_dicts = [] + for f in tqdm(files, desc='Loading checkpoint shards'): + state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False) + # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights + # and also handle the case where it was already removed by another helper script + state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None) + state_dicts.append(state_dict) + + if not ZERO_STAGE in state_dicts[0][OPTIMIZER_STATE_DICT]: + raise ValueError(f"{files[0]} is not a zero checkpoint") + zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE] + world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT] + + # For ZeRO-2 each param group can have different partition_count as data parallelism for expert + # parameters can be different from data parallelism for non-expert parameters. So we can just + # use the max of the partition_count to get the dp world_size. + + if type(world_size) is list: + world_size = max(world_size) + + if world_size != total_files: + raise ValueError( + f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. " + "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes." + ) + + # the groups are named differently in each stage + if zero_stage <= 2: + fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS + elif zero_stage == 3: + fp32_groups_key = FP32_FLAT_GROUPS + else: + raise ValueError(f"unknown zero stage {zero_stage}") + + fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))] + return zero_stage, world_size, fp32_flat_groups + + +def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters): + """ + Returns fp32 state_dict reconstructed from ds checkpoint + + Args: + - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are) + + """ + print(f"Processing zero checkpoint '{ds_checkpoint_dir}'") + + optim_files = get_optim_files(ds_checkpoint_dir) + zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir) + print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}") + + model_files = get_model_state_files(ds_checkpoint_dir) + + zero_model_states = parse_model_states(model_files) + print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}') + + if zero_stage <= 2: + return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + elif zero_stage == 3: + return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters) + + +def _zero2_merge_frozen_params(state_dict, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + frozen_param_fragments = zero_model_states[0].frozen_param_fragments + + if debug: + num_elem = sum(s.numel() for s in frozen_param_shapes.values()) + print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in frozen_param_fragments.values()]) + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + state_dict[name] = frozen_param_fragments[name] + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +def _has_callable(obj, fn): + attr = getattr(obj, fn, None) + return callable(attr) + + +def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + + # Reconstruction protocol: + # + # XXX: document this + + if debug: + for i in range(world_size): + for j in range(len(fp32_flat_groups[0])): + print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}") + + # XXX: memory usage doubles here (zero2) + num_param_groups = len(fp32_flat_groups[0]) + merged_single_partition_of_fp32_groups = [] + for i in range(num_param_groups): + merged_partitions = [sd[i] for sd in fp32_flat_groups] + full_single_fp32_vector = torch.cat(merged_partitions, 0) + merged_single_partition_of_fp32_groups.append(full_single_fp32_vector) + avail_numel = sum( + [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups]) + + if debug: + wanted_params = sum([len(shapes) for shapes in param_shapes]) + wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes]) + # not asserting if there is a mismatch due to possible padding + print(f"Have {avail_numel} numels to process.") + print(f"Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + total_numel = 0 + total_params = 0 + for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups): + offset = 0 + avail_numel = full_single_fp32_vector.numel() + for name, shape in shapes.items(): + + unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape) + total_numel += unpartitioned_numel + total_params += 1 + + if debug: + print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ") + state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape) + offset += unpartitioned_numel + + # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and + # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex + # paddings performed in the code it's almost impossible to predict the exact numbers w/o the + # live optimizer object, so we are checking that the numbers are within the right range + align_to = 2 * world_size + + def zero2_align(x): + return align_to * math.ceil(x / align_to) + + if debug: + print(f"original offset={offset}, avail_numel={avail_numel}") + + offset = zero2_align(offset) + avail_numel = zero2_align(avail_numel) + + if debug: + print(f"aligned offset={offset}, avail_numel={avail_numel}") + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero2_merge_frozen_params(state_dict, zero_model_states) + + _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def zero3_partitioned_param_info(unpartitioned_numel, world_size): + remainder = unpartitioned_numel % world_size + padding_numel = (world_size - remainder) if remainder else 0 + partitioned_numel = math.ceil(unpartitioned_numel / world_size) + return partitioned_numel, padding_numel + + +def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states): + if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0: + return + + if debug: + for i in range(world_size): + num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values()) + print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}') + + frozen_param_shapes = zero_model_states[0].frozen_param_shapes + wanted_params = len(frozen_param_shapes) + wanted_numel = sum(s.numel() for s in frozen_param_shapes.values()) + avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size + print(f'Frozen params: Have {avail_numel} numels to process.') + print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params') + + total_params = 0 + total_numel = 0 + for name, shape in zero_model_states[0].frozen_param_shapes.items(): + total_params += 1 + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + + param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states) + state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape) + + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements") + + +class GatheredTensor: + """ + A pseudo tensor that collects partitioned weights. + It is more memory efficient when there are multiple groups. + """ + + def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape): + self.flat_groups = flat_groups + self.flat_groups_offset = flat_groups_offset + self.offset = offset + self.partitioned_numel = partitioned_numel + self.shape = shape + self.dtype = self.flat_groups[0][0].dtype + + def contiguous(self): + """ + Merge partitioned weights from flat_groups into a single tensor. + """ + end_idx = self.offset + self.partitioned_numel + world_size = len(self.flat_groups) + pad_flat_param_chunks = [] + + for rank_i in range(world_size): + # for each rank, we need to collect weights from related group/groups + flat_groups_at_rank_i = self.flat_groups[rank_i] + start_group_id = None + end_group_id = None + for group_id in range(len(self.flat_groups_offset)): + if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]: + start_group_id = group_id + if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]: + end_group_id = group_id + break + # collect weights from related group/groups + for group_id in range(start_group_id, end_group_id + 1): + flat_tensor = flat_groups_at_rank_i[group_id] + start_offset = self.offset - self.flat_groups_offset[group_id] + end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id] + pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset]) + + # collect weights from all ranks + pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0) + param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous() + return param + + +def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states): + param_shapes = zero_model_states[0].param_shapes + avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size + + # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each + # param, re-consolidating each param, while dealing with padding if any + + # merge list of dicts, preserving order + param_shapes = {k: v for d in param_shapes for k, v in d.items()} + + if debug: + for i in range(world_size): + print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}") + + wanted_params = len(param_shapes) + wanted_numel = sum(shape.numel() for shape in param_shapes.values()) + # not asserting if there is a mismatch due to possible padding + avail_numel = fp32_flat_groups[0].numel() * world_size + print(f"Trainable params: Have {avail_numel} numels to process.") + print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.") + + # params + # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support + # out-of-core computing solution + offset = 0 + total_numel = 0 + total_params = 0 + flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]])) + for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'): + unpartitioned_numel = shape.numel() + total_numel += unpartitioned_numel + total_params += 1 + partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size) + + if debug: + print( + f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}" + ) + + # memory efficient tensor + tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape) + state_dict[name] = tensor + offset += partitioned_numel + + offset *= world_size + + # Sanity check + if offset != avail_numel: + raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong") + + print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements") + + +def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states, + exclude_frozen_parameters): + state_dict = OrderedDict() + + # buffers + buffers = zero_model_states[0].buffers + state_dict.update(buffers) + if debug: + print(f"added {len(buffers)} buffers") + + if not exclude_frozen_parameters: + _zero3_merge_frozen_params(state_dict, world_size, zero_model_states) + + _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states) + + # recover shared parameters + for pair in zero_model_states[0].shared_params: + if pair[1] in state_dict: + state_dict[pair[0]] = state_dict[pair[1]] + + return state_dict + + +def to_torch_tensor(state_dict, return_empty_tensor=False): + """ + Convert state_dict of GatheredTensor to torch tensor + """ + torch_state_dict = {} + converted_tensors = {} + for name, tensor in state_dict.items(): + tensor_id = id(tensor) + if tensor_id in converted_tensors: # shared tensors + shared_tensor = torch_state_dict[converted_tensors[tensor_id]] + torch_state_dict[name] = shared_tensor + else: + converted_tensors[tensor_id] = name + if return_empty_tensor: + torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype) + else: + torch_state_dict[name] = tensor.contiguous() + return torch_state_dict + + +def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag=None, + exclude_frozen_parameters=False, + lazy_mode=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with + ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example + via a model hub. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient. + Convert the pesduo tensor to torch tensor by ``.contiguous()`` + + Returns: + - pytorch ``state_dict`` + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + # do the training and checkpoint saving + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu + model = model.cpu() # move to cpu + model.load_state_dict(state_dict) + # submit to model hub or save the model to share with others + + In this example the ``model`` will no longer be usable in the deepspeed context of the same + application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead. + + Note: the above usage may not work if your application doesn't have sufficient free CPU memory. + You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with + the checkpoint. Or you can load state_dict in lazy mode :: + + from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu + for name, lazy_tensor in state_dict.item(): + tensor = lazy_tensor.contiguous() # to cpu + print(name, tensor) + # del tensor to release memory if it no longer in use + """ + if tag is None: + latest_path = os.path.join(checkpoint_dir, 'latest') + if os.path.isfile(latest_path): + with open(latest_path, 'r') as fd: + tag = fd.read().strip() + else: + raise ValueError(f"Unable to find 'latest' file at {latest_path}") + + ds_checkpoint_dir = os.path.join(checkpoint_dir, tag) + + if not os.path.isdir(ds_checkpoint_dir): + raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist") + + state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters) + if lazy_mode: + return state_dict + else: + return to_torch_tensor(state_dict) + + +def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir, + output_dir, + max_shard_size="5GB", + safe_serialization=False, + tag=None, + exclude_frozen_parameters=False): + """ + Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be + loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed. + + Args: + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``output_dir``: directory to the pytorch fp32 state_dict output files + - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB + - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`). + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + - ``exclude_frozen_parameters``: exclude frozen parameters + """ + + # Dependency pre-check + if safe_serialization: + try: + from safetensors.torch import save_file + except ImportError: + print('If you want to use `safe_serialization`, please `pip install safetensors`') + raise + if max_shard_size is not None: + try: + from huggingface_hub import split_torch_state_dict_into_shards + except ImportError: + print('If you want to use `max_shard_size`, please `pip install huggingface_hub`') + raise + + # Convert zero checkpoint to state_dict + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, + tag, + exclude_frozen_parameters, + lazy_mode=True) + + # Shard the model if it is too big. + weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin" + if max_shard_size is not None: + filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors") + # an memory-efficient approach for sharding + empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True) + state_dict_split = split_torch_state_dict_into_shards(empty_state_dict, + filename_pattern=filename_pattern, + max_shard_size=max_shard_size) + else: + from collections import namedtuple + StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"]) + state_dict_split = StateDictSplit(is_sharded=False, + filename_to_tensors={weights_name: list(state_dict.keys())}) + + # Save the model by shard + os.makedirs(output_dir, exist_ok=True) + filename_to_tensors = state_dict_split.filename_to_tensors.items() + for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"): + shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors} + shard_state_dict = to_torch_tensor(shard_state_dict) + output_path = os.path.join(output_dir, shard_file) + if safe_serialization: + save_file(shard_state_dict, output_path, metadata={"format": "pt"}) + else: + torch.save(shard_state_dict, output_path) + # release the memory of current shard + for tensor_name in list(shard_state_dict.keys()): + del state_dict[tensor_name] + del shard_state_dict[tensor_name] + del shard_state_dict + gc.collect() + + # Save index if sharded + if state_dict_split.is_sharded: + index = { + "metadata": state_dict_split.metadata, + "weight_map": state_dict_split.tensor_to_filename, + } + save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json" + save_index_file = os.path.join(output_dir, save_index_file) + with open(save_index_file, "w", encoding="utf-8") as f: + content = json.dumps(index, indent=2, sort_keys=True) + "\n" + f.write(content) + + +def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None): + """ + 1. Put the provided model to cpu + 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` + 3. Load it into the provided model + + Args: + - ``model``: the model object to update + - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``) + - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14`` + + Returns: + - ``model`: modified model + + Make sure you have plenty of CPU memory available before you call this function. If you don't + have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it + conveniently placed for you in the checkpoint folder. + + A typical usage might be :: + + from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint + model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir) + # submit to model hub or save the model to share with others + + Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context + of the same application. i.e. you will need to re-initialize the deepspeed engine, since + ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it. + + """ + logger.info(f"Extracting fp32 weights") + state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag) + + logger.info(f"Overwriting model with fp32 weights") + model = model.cpu() + model.load_state_dict(state_dict, strict=False) + + return model + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("checkpoint_dir", + type=str, + help="path to the desired checkpoint folder, e.g., path/checkpoint-12") + parser.add_argument("output_dir", + type=str, + help="directory to the pytorch fp32 state_dict output files" + "(e.g. path/checkpoint-12-output/)") + parser.add_argument( + "--max_shard_size", + type=str, + default="5GB", + help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size" + "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`" + "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances" + "without CPU OOM issues.") + parser.add_argument( + "--safe_serialization", + default=False, + action='store_true', + help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).") + parser.add_argument("-t", + "--tag", + type=str, + default=None, + help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1") + parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters") + parser.add_argument("-d", "--debug", action='store_true', help="enable debug") + args = parser.parse_args() + + debug = args.debug + + convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir, + args.output_dir, + max_shard_size=args.max_shard_size, + safe_serialization=args.safe_serialization, + tag=args.tag, + exclude_frozen_parameters=args.exclude_frozen_parameters) diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740318351.2888598/events.out.tfevents.1740318351.computer4.3891237.1 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740318351.2888598/events.out.tfevents.1740318351.computer4.3891237.1 new file mode 100644 index 0000000000000000000000000000000000000000..82d817369e0be6c6851806f239c88476db29df85 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740318351.2888598/events.out.tfevents.1740318351.computer4.3891237.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e62dbd54cebc97a668e521f72153ee350e3e7fa0cd60993d52cfdb708fda489 +size 2639 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740318351.2916248/hparams.yml b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740318351.2916248/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..bdb968d0915a35144f1078be336d48e819d52971 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740318351.2916248/hparams.yml @@ -0,0 +1,48 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +alpha: 0.9 +cam_ext_mask_prob: -1.0 +checkpointing_period: 2000 +checkpoints_total_limit: 15 +cond_mask_prob: 0.1 +config_path: configs/base.yaml +dataloader_num_workers: 8 +dataset_type: finetune +deepspeed: ./configs/zero2.json +gradient_accumulation_steps: 1 +gradient_checkpointing: false +hub_model_id: null +hub_token: null +image_aug: true +learning_rate: 0.0001 +load_from_hdf5: true +local_rank: 0 +logging_dir: logs +lr_num_cycles: 1 +lr_power: 1.0 +lr_scheduler: constant_with_warmup +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 160000 +mixed_precision: bf16 +num_sample_batches: 2 +num_train_epochs: 2667 +output_dir: /mnt/data/xurongtao/checkpoints/afford_1b_three_qwen_warmup_0224 +precomp_lang_embed: false +pretrained_model_name_or_path: null +pretrained_text_encoder_name_or_path: google/t5-v1_1-xxl +pretrained_vision_encoder_name_or_path: google/siglip-so400m-patch14-384 +push_to_hub: false +report_to: tensorboard +resume_from_checkpoint: null +sample_batch_size: 35 +sample_period: 150 +scale_lr: false +seed: null +set_grads_to_none: false +state_noise_snr: 40.0 +train_batch_size: 40 +use_8bit_adam: false diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325720.2930408/events.out.tfevents.1740325720.computer4.4166006.1 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325720.2930408/events.out.tfevents.1740325720.computer4.4166006.1 new file mode 100644 index 0000000000000000000000000000000000000000..33f182f7b2ba44a7eb5812e38099a40684e27280 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325720.2930408/events.out.tfevents.1740325720.computer4.4166006.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ae3200dc2b6dcc5077635014d20145864202625dd4546d6800fd730df4eb8a78 +size 2712 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325720.2956624/hparams.yml b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325720.2956624/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..07e5537b9f6c7c431e6b1f9d528af3b1f920af2a --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325720.2956624/hparams.yml @@ -0,0 +1,48 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +alpha: 0.9 +cam_ext_mask_prob: -1.0 +checkpointing_period: 2000 +checkpoints_total_limit: 15 +cond_mask_prob: 0.1 +config_path: configs/base.yaml +dataloader_num_workers: 8 +dataset_type: finetune +deepspeed: ./configs/zero2.json +gradient_accumulation_steps: 1 +gradient_checkpointing: false +hub_model_id: null +hub_token: null +image_aug: true +learning_rate: 0.0001 +load_from_hdf5: true +local_rank: 0 +logging_dir: logs +lr_num_cycles: 1 +lr_power: 1.0 +lr_scheduler: constant_with_warmup +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 160000 +mixed_precision: bf16 +num_sample_batches: 2 +num_train_epochs: 2963 +output_dir: /mnt/data/xurongtao/checkpoints/afford_1b_three_qwen_warmup_0224 +precomp_lang_embed: false +pretrained_model_name_or_path: null +pretrained_text_encoder_name_or_path: google/t5-v1_1-xxl +pretrained_vision_encoder_name_or_path: google/siglip-so400m-patch14-384 +push_to_hub: false +report_to: tensorboard +resume_from_checkpoint: checkpoint-4000 +sample_batch_size: 35 +sample_period: 150 +scale_lr: false +seed: null +set_grads_to_none: false +state_noise_snr: 40.0 +train_batch_size: 45 +use_8bit_adam: false diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325876.402376/events.out.tfevents.1740325876.computer4.4187731.1 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325876.402376/events.out.tfevents.1740325876.computer4.4187731.1 new file mode 100644 index 0000000000000000000000000000000000000000..d11afd0769a9cd04948e886d53784b23c3074e14 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325876.402376/events.out.tfevents.1740325876.computer4.4187731.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77879855c7908d06c1061263934696b55cb913972dae6f762fa44b171464307f +size 2712 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325876.404853/hparams.yml b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325876.404853/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..d408746c3f4d3dc1d5f7c052bd4594fdb438ead0 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740325876.404853/hparams.yml @@ -0,0 +1,48 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +alpha: 0.9 +cam_ext_mask_prob: -1.0 +checkpointing_period: 2000 +checkpoints_total_limit: 15 +cond_mask_prob: 0.1 +config_path: configs/base.yaml +dataloader_num_workers: 8 +dataset_type: finetune +deepspeed: ./configs/zero2.json +gradient_accumulation_steps: 1 +gradient_checkpointing: false +hub_model_id: null +hub_token: null +image_aug: true +learning_rate: 0.0001 +load_from_hdf5: true +local_rank: 0 +logging_dir: logs +lr_num_cycles: 1 +lr_power: 1.0 +lr_scheduler: constant_with_warmup +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 160000 +mixed_precision: bf16 +num_sample_batches: 2 +num_train_epochs: 3334 +output_dir: /mnt/data/xurongtao/checkpoints/afford_1b_three_qwen_warmup_0224 +precomp_lang_embed: false +pretrained_model_name_or_path: null +pretrained_text_encoder_name_or_path: google/t5-v1_1-xxl +pretrained_vision_encoder_name_or_path: google/siglip-so400m-patch14-384 +push_to_hub: false +report_to: tensorboard +resume_from_checkpoint: checkpoint-4000 +sample_batch_size: 35 +sample_period: 150 +scale_lr: false +seed: null +set_grads_to_none: false +state_noise_snr: 40.0 +train_batch_size: 50 +use_8bit_adam: false diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740415627.6681213/events.out.tfevents.1740415627.computer4.3743808.1 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740415627.6681213/events.out.tfevents.1740415627.computer4.3743808.1 new file mode 100644 index 0000000000000000000000000000000000000000..485e001a5c1a758efee10c6936e3c1ce9792df3e --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740415627.6681213/events.out.tfevents.1740415627.computer4.3743808.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c59409cd63126cedc4f023d249215254828e3abd09061c1215164a8d5d93f883 +size 2713 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740415627.6703348/hparams.yml b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740415627.6703348/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..06d3601b1a3a7c6382ed3b64cbbfd40c6c008639 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740415627.6703348/hparams.yml @@ -0,0 +1,48 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +alpha: 0.9 +cam_ext_mask_prob: -1.0 +checkpointing_period: 2000 +checkpoints_total_limit: 15 +cond_mask_prob: 0.1 +config_path: configs/base.yaml +dataloader_num_workers: 8 +dataset_type: finetune +deepspeed: ./configs/zero2.json +gradient_accumulation_steps: 1 +gradient_checkpointing: false +hub_model_id: null +hub_token: null +image_aug: true +learning_rate: 0.0001 +load_from_hdf5: true +local_rank: 0 +logging_dir: logs +lr_num_cycles: 1 +lr_power: 1.0 +lr_scheduler: constant_with_warmup +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 160000 +mixed_precision: bf16 +num_sample_batches: 2 +num_train_epochs: 3334 +output_dir: /mnt/data/xurongtao/checkpoints/afford_1b_three_qwen_warmup_0224 +precomp_lang_embed: false +pretrained_model_name_or_path: null +pretrained_text_encoder_name_or_path: google/t5-v1_1-xxl +pretrained_vision_encoder_name_or_path: google/siglip-so400m-patch14-384 +push_to_hub: false +report_to: tensorboard +resume_from_checkpoint: checkpoint-40000 +sample_batch_size: 35 +sample_period: 150 +scale_lr: false +seed: null +set_grads_to_none: false +state_noise_snr: 40.0 +train_batch_size: 50 +use_8bit_adam: false diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740479243.5471084/events.out.tfevents.1740479243.computer4.3883696.1 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740479243.5471084/events.out.tfevents.1740479243.computer4.3883696.1 new file mode 100644 index 0000000000000000000000000000000000000000..33f80c588c4d1137f2b7a9df5eae79abfaa9e5ae --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740479243.5471084/events.out.tfevents.1740479243.computer4.3883696.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df8d2cf46ec79ad610a5bfd50fd2943b51947ad56657cda8ccd6e66da62a4cf1 +size 2713 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740479243.5496354/hparams.yml b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740479243.5496354/hparams.yml new file mode 100644 index 0000000000000000000000000000000000000000..6a8bfd12fa1df2e9f116d76bf395b65118fe8c46 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/1740479243.5496354/hparams.yml @@ -0,0 +1,48 @@ +adam_beta1: 0.9 +adam_beta2: 0.999 +adam_epsilon: 1.0e-08 +adam_weight_decay: 0.01 +allow_tf32: false +alpha: 0.9 +cam_ext_mask_prob: -1.0 +checkpointing_period: 2000 +checkpoints_total_limit: 15 +cond_mask_prob: 0.1 +config_path: configs/base.yaml +dataloader_num_workers: 8 +dataset_type: finetune +deepspeed: ./configs/zero2.json +gradient_accumulation_steps: 1 +gradient_checkpointing: false +hub_model_id: null +hub_token: null +image_aug: true +learning_rate: 0.0001 +load_from_hdf5: true +local_rank: 0 +logging_dir: logs +lr_num_cycles: 1 +lr_power: 1.0 +lr_scheduler: constant_with_warmup +lr_warmup_steps: 500 +max_grad_norm: 1.0 +max_train_steps: 160000 +mixed_precision: bf16 +num_sample_batches: 2 +num_train_epochs: 3334 +output_dir: /mnt/data/xurongtao/checkpoints/afford_1b_three_qwen_warmup_0224 +precomp_lang_embed: false +pretrained_model_name_or_path: null +pretrained_text_encoder_name_or_path: google/t5-v1_1-xxl +pretrained_vision_encoder_name_or_path: google/siglip-so400m-patch14-384 +push_to_hub: false +report_to: tensorboard +resume_from_checkpoint: checkpoint-70000 +sample_batch_size: 35 +sample_period: 150 +scale_lr: false +seed: null +set_grads_to_none: false +state_noise_snr: 40.0 +train_batch_size: 50 +use_8bit_adam: false diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740318351.computer4.3891237.0 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740318351.computer4.3891237.0 new file mode 100644 index 0000000000000000000000000000000000000000..d424807e953a33345e24f29da6731277e223e1b4 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740318351.computer4.3891237.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d1eb5ba077d4fd9163092836347d9f0ed43b9910f7a7a92940fe265124866df +size 980223 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740325720.computer4.4166006.0 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740325720.computer4.4166006.0 new file mode 100644 index 0000000000000000000000000000000000000000..96dd83e5c014e7c3d4e1274e8e5b2d534b439719 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740325720.computer4.4166006.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:12870892e95080a377c5bd9d7b1569ac65f8eb00a28c908249d2894adc7cb26a +size 8044 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740325876.computer4.4187731.0 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740325876.computer4.4187731.0 new file mode 100644 index 0000000000000000000000000000000000000000..669b212b452e1a3e855ba65c9f1c32e605c7a9b5 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740325876.computer4.4187731.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a961b8cfcf29caab6aa2399bf9fc738861fcf0f246d756ebc2e8b2bd5a076e +size 8388608 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740415627.computer4.3743808.0 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740415627.computer4.3743808.0 new file mode 100644 index 0000000000000000000000000000000000000000..7dae327713e09524bea26d0ecdf8805333677a80 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740415627.computer4.3743808.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:499087c13943a147cc51c1d59f16fa7e4b1644419e250f860c265504775938f2 +size 7053478 diff --git a/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740479243.computer4.3883696.0 b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740479243.computer4.3883696.0 new file mode 100644 index 0000000000000000000000000000000000000000..495be5c28dcff01ac2da1997f851b89274e201b2 --- /dev/null +++ b/afford_1b_three_qwen_warmup_0224/logs/roboticDiffusionTransformer/events.out.tfevents.1740479243.computer4.3883696.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ee2c15df5439aa2ee19f705bfe07185fd33145997bef94c1273612af40a7f02 +size 88