whisperkittools-86c1a11b9398e0201bcfca4f9cdf2cb8adc41f73 generated files: openai_whisper-tiny.en
Browse files- openai_whisper-tiny.en/AudioEncoder.mlmodelc/analytics/coremldata.bin +1 -1
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/coremldata.bin +2 -2
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json +2 -2
- openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil +129 -105
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin +1 -1
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin +2 -2
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json +2 -2
- openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin +2 -2
- openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json +1 -1
- openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil +0 -0
openai_whisper-tiny.en/AudioEncoder.mlmodelc/analytics/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 243
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eaaaa6671a96a359a0bbd5e97885246dcc17f7435b6ffad8d871bb940964500b
|
| 3 |
size 243
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:325b182d0a4266730a81795ae6b7a787b5111dd091500fc0c04dedf610015d46
|
| 3 |
+
size 347
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/metadata.json
CHANGED
|
@@ -46,8 +46,8 @@
|
|
| 46 |
},
|
| 47 |
"userDefinedMetadata" : {
|
| 48 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 49 |
-
"com.github.apple.coremltools.
|
| 50 |
-
"com.github.apple.coremltools.
|
| 51 |
},
|
| 52 |
"inputSchema" : [
|
| 53 |
{
|
|
|
|
| 46 |
},
|
| 47 |
"userDefinedMetadata" : {
|
| 48 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 49 |
+
"com.github.apple.coremltools.version" : "8.0",
|
| 50 |
+
"com.github.apple.coremltools.source" : "torch==2.4.1"
|
| 51 |
},
|
| 52 |
"inputSchema" : [
|
| 53 |
{
|
openai_whisper-tiny.en/AudioEncoder.mlmodelc/model.mil
CHANGED
|
@@ -1,25 +1,25 @@
|
|
| 1 |
program(1.0)
|
| 2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.
|
| 3 |
{
|
| 4 |
func main<ios16>(tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features) {
|
| 5 |
-
tensor<int32, [2]> var_34 = const()[name = tensor<string, []>("op_34"), val = tensor<int32, [2]>([1, 1])];
|
| 6 |
-
tensor<int32, [2]> var_40 = const()[name = tensor<string, []>("op_40"), val = tensor<int32, [2]>([1, 1])];
|
| 7 |
-
tensor<int32, []> var_45 = const()[name = tensor<string, []>("op_45"), val = tensor<int32, []>(1)];
|
| 8 |
tensor<string, []> var_50_pad_type_0 = const()[name = tensor<string, []>("op_50_pad_type_0"), val = tensor<string, []>("custom")];
|
| 9 |
tensor<int32, [4]> var_50_pad_0 = const()[name = tensor<string, []>("op_50_pad_0"), val = tensor<int32, [4]>([0, 0, 1, 1])];
|
|
|
|
|
|
|
|
|
|
| 10 |
tensor<fp16, [384, 80, 1, 3]> var_25_to_fp16 = const()[name = tensor<string, []>("op_25_to_fp16"), val = tensor<fp16, [384, 80, 1, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 11 |
tensor<fp16, [384]> var_31_to_fp16 = const()[name = tensor<string, []>("op_31_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(184448)))];
|
| 12 |
-
tensor<fp16, [1, 384, 1, 3000]> var_50_cast_fp16 = conv(bias = var_31_to_fp16, dilations =
|
| 13 |
tensor<string, []> hidden_states_1_mode_0 = const()[name = tensor<string, []>("hidden_states_1_mode_0"), val = tensor<string, []>("EXACT")];
|
| 14 |
tensor<fp16, [1, 384, 1, 3000]> hidden_states_1_cast_fp16 = gelu(mode = hidden_states_1_mode_0, x = var_50_cast_fp16)[name = tensor<string, []>("hidden_states_1_cast_fp16")];
|
| 15 |
-
tensor<int32, [2]> var_74 = const()[name = tensor<string, []>("op_74"), val = tensor<int32, [2]>([2, 2])];
|
| 16 |
-
tensor<int32, [2]> var_80 = const()[name = tensor<string, []>("op_80"), val = tensor<int32, [2]>([1, 1])];
|
| 17 |
-
tensor<int32, []> var_85 = const()[name = tensor<string, []>("op_85"), val = tensor<int32, []>(1)];
|
| 18 |
tensor<string, []> var_90_pad_type_0 = const()[name = tensor<string, []>("op_90_pad_type_0"), val = tensor<string, []>("custom")];
|
| 19 |
tensor<int32, [4]> var_90_pad_0 = const()[name = tensor<string, []>("op_90_pad_0"), val = tensor<int32, [4]>([0, 0, 1, 1])];
|
|
|
|
|
|
|
|
|
|
| 20 |
tensor<fp16, [384, 384, 1, 3]> var_65_to_fp16 = const()[name = tensor<string, []>("op_65_to_fp16"), val = tensor<fp16, [384, 384, 1, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(185280)))];
|
| 21 |
tensor<fp16, [384]> var_71_to_fp16 = const()[name = tensor<string, []>("op_71_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1070080)))];
|
| 22 |
-
tensor<fp16, [1, 384, 1, 1500]> var_90_cast_fp16 = conv(bias = var_71_to_fp16, dilations =
|
| 23 |
tensor<string, []> hidden_states_3_mode_0 = const()[name = tensor<string, []>("hidden_states_3_mode_0"), val = tensor<string, []>("EXACT")];
|
| 24 |
tensor<fp16, [1, 384, 1, 1500]> hidden_states_3_cast_fp16 = gelu(mode = hidden_states_3_mode_0, x = var_90_cast_fp16)[name = tensor<string, []>("hidden_states_3_cast_fp16")];
|
| 25 |
tensor<fp16, [1, 384, 1, 1500]> var_108_to_fp16 = const()[name = tensor<string, []>("op_108_to_fp16"), val = tensor<fp16, [1, 384, 1, 1500]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1070912)))];
|
|
@@ -35,26 +35,29 @@ program(1.0)
|
|
| 35 |
tensor<fp16, [384]> obj_1_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_1_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2225472)))];
|
| 36 |
tensor<fp16, []> obj_1_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_1_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 37 |
tensor<fp16, [1, 384, 1, 1500]> obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor<string, []>("obj_1_cast_fp16")];
|
| 38 |
-
tensor<
|
| 39 |
-
tensor<int32, [2]>
|
| 40 |
-
tensor<string, []> query_1_pad_type_0 = const()[name = tensor<string, []>("query_1_pad_type_0"), val = tensor<string, []>("custom")];
|
| 41 |
tensor<int32, [4]> query_1_pad_0 = const()[name = tensor<string, []>("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 42 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2226304)))];
|
| 43 |
tensor<fp16, [384]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2521280)))];
|
| 44 |
-
tensor<fp16, [1, 384, 1, 1500]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations =
|
| 45 |
-
tensor<
|
| 46 |
-
tensor<int32, [2]>
|
| 47 |
-
tensor<string, []> key_1_pad_type_0 = const()[name = tensor<string, []>("key_1_pad_type_0"), val = tensor<string, []>("custom")];
|
| 48 |
tensor<int32, [4]> key_1_pad_0 = const()[name = tensor<string, []>("key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 49 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2522112)))];
|
| 50 |
-
tensor<fp16, [1, 384, 1, 1500]> key_1_cast_fp16 = conv(dilations =
|
| 51 |
-
tensor<
|
| 52 |
-
tensor<int32, [2]>
|
| 53 |
-
tensor<string, []> value_1_pad_type_0 = const()[name = tensor<string, []>("value_1_pad_type_0"), val = tensor<string, []>("custom")];
|
| 54 |
tensor<int32, [4]> value_1_pad_0 = const()[name = tensor<string, []>("value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 55 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2817088)))];
|
| 56 |
tensor<fp16, [384]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3112064)))];
|
| 57 |
-
tensor<fp16, [1, 384, 1, 1500]> value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations =
|
| 58 |
tensor<int32, [4]> var_184_begin_0 = const()[name = tensor<string, []>("op_184_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 59 |
tensor<int32, [4]> var_184_end_0 = const()[name = tensor<string, []>("op_184_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 60 |
tensor<bool, [4]> var_184_end_mask_0 = const()[name = tensor<string, []>("op_184_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
@@ -407,13 +410,14 @@ program(1.0)
|
|
| 407 |
tensor<fp16, [1, 64, 1, 1500]> var_603_cast_fp16 = concat(axis = var_118, interleave = var_603_interleave_0, values = (var_585_cast_fp16, var_587_cast_fp16, var_589_cast_fp16, var_591_cast_fp16))[name = tensor<string, []>("op_603_cast_fp16")];
|
| 408 |
tensor<bool, []> input_1_interleave_0 = const()[name = tensor<string, []>("input_1_interleave_0"), val = tensor<bool, []>(false)];
|
| 409 |
tensor<fp16, [1, 384, 1, 1500]> input_1_cast_fp16 = concat(axis = var_129, interleave = input_1_interleave_0, values = (var_593_cast_fp16, var_595_cast_fp16, var_597_cast_fp16, var_599_cast_fp16, var_601_cast_fp16, var_603_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
|
| 410 |
-
tensor<
|
| 411 |
-
tensor<int32, [2]>
|
| 412 |
-
tensor<string, []> obj_3_pad_type_0 = const()[name = tensor<string, []>("obj_3_pad_type_0"), val = tensor<string, []>("custom")];
|
| 413 |
tensor<int32, [4]> obj_3_pad_0 = const()[name = tensor<string, []>("obj_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 414 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3112896)))];
|
| 415 |
tensor<fp16, [384]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3407872)))];
|
| 416 |
-
tensor<fp16, [1, 384, 1, 1500]> obj_3_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations =
|
| 417 |
tensor<fp16, [1, 384, 1, 1500]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_3_cast_fp16)[name = tensor<string, []>("inputs_3_cast_fp16")];
|
| 418 |
tensor<int32, [1]> out_3_axes_0 = const()[name = tensor<string, []>("out_3_axes_0"), val = tensor<int32, [1]>([1])];
|
| 419 |
tensor<fp16, []> var_622_to_fp16 = const()[name = tensor<string, []>("op_622_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
@@ -422,22 +426,24 @@ program(1.0)
|
|
| 422 |
tensor<fp16, [384]> input_3_beta_0_to_fp16 = const()[name = tensor<string, []>("input_3_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3409536)))];
|
| 423 |
tensor<fp16, []> input_3_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_3_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 424 |
tensor<fp16, [1, 384, 1, 1500]> input_3_cast_fp16 = batch_norm(beta = input_3_beta_0_to_fp16, epsilon = input_3_epsilon_0_to_fp16, gamma = input_3_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
| 425 |
-
tensor<
|
| 426 |
-
tensor<int32, [2]>
|
| 427 |
-
tensor<string, []> input_5_pad_type_0 = const()[name = tensor<string, []>("input_5_pad_type_0"), val = tensor<string, []>("custom")];
|
| 428 |
tensor<int32, [4]> input_5_pad_0 = const()[name = tensor<string, []>("input_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 429 |
tensor<fp16, [1536, 384, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3410368)))];
|
| 430 |
tensor<fp16, [1536]> layers_0_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4590080)))];
|
| 431 |
-
tensor<fp16, [1, 1536, 1, 1500]> input_5_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations =
|
| 432 |
tensor<string, []> input_7_mode_0 = const()[name = tensor<string, []>("input_7_mode_0"), val = tensor<string, []>("EXACT")];
|
| 433 |
tensor<fp16, [1, 1536, 1, 1500]> input_7_cast_fp16 = gelu(mode = input_7_mode_0, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
| 434 |
-
tensor<
|
| 435 |
-
tensor<int32, [2]>
|
| 436 |
-
tensor<string, []> hidden_states_5_pad_type_0 = const()[name = tensor<string, []>("hidden_states_5_pad_type_0"), val = tensor<string, []>("custom")];
|
| 437 |
tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = tensor<string, []>("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 438 |
tensor<fp16, [384, 1536, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4593216)))];
|
| 439 |
tensor<fp16, [384]> layers_0_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5772928)))];
|
| 440 |
-
tensor<fp16, [1, 384, 1, 1500]> hidden_states_5_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations =
|
| 441 |
tensor<fp16, [1, 384, 1, 1500]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor<string, []>("inputs_5_cast_fp16")];
|
| 442 |
tensor<int32, []> var_651 = const()[name = tensor<string, []>("op_651"), val = tensor<int32, []>(3)];
|
| 443 |
tensor<int32, []> var_662 = const()[name = tensor<string, []>("op_662"), val = tensor<int32, []>(1)];
|
|
@@ -448,26 +454,29 @@ program(1.0)
|
|
| 448 |
tensor<fp16, [384]> obj_5_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_5_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5774592)))];
|
| 449 |
tensor<fp16, []> obj_5_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_5_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 450 |
tensor<fp16, [1, 384, 1, 1500]> obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor<string, []>("obj_5_cast_fp16")];
|
| 451 |
-
tensor<
|
| 452 |
-
tensor<int32, [2]>
|
| 453 |
-
tensor<string, []> query_3_pad_type_0 = const()[name = tensor<string, []>("query_3_pad_type_0"), val = tensor<string, []>("custom")];
|
| 454 |
tensor<int32, [4]> query_3_pad_0 = const()[name = tensor<string, []>("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 455 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5775424)))];
|
| 456 |
tensor<fp16, [384]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6070400)))];
|
| 457 |
-
tensor<fp16, [1, 384, 1, 1500]> query_3_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations =
|
| 458 |
-
tensor<
|
| 459 |
-
tensor<int32, [2]>
|
| 460 |
-
tensor<string, []> key_3_pad_type_0 = const()[name = tensor<string, []>("key_3_pad_type_0"), val = tensor<string, []>("custom")];
|
| 461 |
tensor<int32, [4]> key_3_pad_0 = const()[name = tensor<string, []>("key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 462 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6071232)))];
|
| 463 |
-
tensor<fp16, [1, 384, 1, 1500]> key_3_cast_fp16 = conv(dilations =
|
| 464 |
-
tensor<
|
| 465 |
-
tensor<int32, [2]>
|
| 466 |
-
tensor<string, []> value_3_pad_type_0 = const()[name = tensor<string, []>("value_3_pad_type_0"), val = tensor<string, []>("custom")];
|
| 467 |
tensor<int32, [4]> value_3_pad_0 = const()[name = tensor<string, []>("value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 468 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6366208)))];
|
| 469 |
tensor<fp16, [384]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6661184)))];
|
| 470 |
-
tensor<fp16, [1, 384, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations =
|
| 471 |
tensor<int32, [4]> var_717_begin_0 = const()[name = tensor<string, []>("op_717_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 472 |
tensor<int32, [4]> var_717_end_0 = const()[name = tensor<string, []>("op_717_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 473 |
tensor<bool, [4]> var_717_end_mask_0 = const()[name = tensor<string, []>("op_717_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
@@ -820,13 +829,14 @@ program(1.0)
|
|
| 820 |
tensor<fp16, [1, 64, 1, 1500]> var_1136_cast_fp16 = concat(axis = var_651, interleave = var_1136_interleave_0, values = (var_1118_cast_fp16, var_1120_cast_fp16, var_1122_cast_fp16, var_1124_cast_fp16))[name = tensor<string, []>("op_1136_cast_fp16")];
|
| 821 |
tensor<bool, []> input_9_interleave_0 = const()[name = tensor<string, []>("input_9_interleave_0"), val = tensor<bool, []>(false)];
|
| 822 |
tensor<fp16, [1, 384, 1, 1500]> input_9_cast_fp16 = concat(axis = var_662, interleave = input_9_interleave_0, values = (var_1126_cast_fp16, var_1128_cast_fp16, var_1130_cast_fp16, var_1132_cast_fp16, var_1134_cast_fp16, var_1136_cast_fp16))[name = tensor<string, []>("input_9_cast_fp16")];
|
| 823 |
-
tensor<
|
| 824 |
-
tensor<int32, [2]>
|
| 825 |
-
tensor<string, []> obj_7_pad_type_0 = const()[name = tensor<string, []>("obj_7_pad_type_0"), val = tensor<string, []>("custom")];
|
| 826 |
tensor<int32, [4]> obj_7_pad_0 = const()[name = tensor<string, []>("obj_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 827 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6662016)))];
|
| 828 |
tensor<fp16, [384]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6956992)))];
|
| 829 |
-
tensor<fp16, [1, 384, 1, 1500]> obj_7_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations =
|
| 830 |
tensor<fp16, [1, 384, 1, 1500]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = obj_7_cast_fp16)[name = tensor<string, []>("inputs_7_cast_fp16")];
|
| 831 |
tensor<int32, [1]> out_7_axes_0 = const()[name = tensor<string, []>("out_7_axes_0"), val = tensor<int32, [1]>([1])];
|
| 832 |
tensor<fp16, []> var_1155_to_fp16 = const()[name = tensor<string, []>("op_1155_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
@@ -835,22 +845,24 @@ program(1.0)
|
|
| 835 |
tensor<fp16, [384]> input_11_beta_0_to_fp16 = const()[name = tensor<string, []>("input_11_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6958656)))];
|
| 836 |
tensor<fp16, []> input_11_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_11_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 837 |
tensor<fp16, [1, 384, 1, 1500]> input_11_cast_fp16 = batch_norm(beta = input_11_beta_0_to_fp16, epsilon = input_11_epsilon_0_to_fp16, gamma = input_11_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
| 838 |
-
tensor<
|
| 839 |
-
tensor<int32, [2]>
|
| 840 |
-
tensor<string, []> input_13_pad_type_0 = const()[name = tensor<string, []>("input_13_pad_type_0"), val = tensor<string, []>("custom")];
|
| 841 |
tensor<int32, [4]> input_13_pad_0 = const()[name = tensor<string, []>("input_13_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 842 |
tensor<fp16, [1536, 384, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6959488)))];
|
| 843 |
tensor<fp16, [1536]> layers_1_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8139200)))];
|
| 844 |
-
tensor<fp16, [1, 1536, 1, 1500]> input_13_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations =
|
| 845 |
tensor<string, []> input_15_mode_0 = const()[name = tensor<string, []>("input_15_mode_0"), val = tensor<string, []>("EXACT")];
|
| 846 |
tensor<fp16, [1, 1536, 1, 1500]> input_15_cast_fp16 = gelu(mode = input_15_mode_0, x = input_13_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
| 847 |
-
tensor<
|
| 848 |
-
tensor<int32, [2]>
|
| 849 |
-
tensor<string, []> hidden_states_7_pad_type_0 = const()[name = tensor<string, []>("hidden_states_7_pad_type_0"), val = tensor<string, []>("custom")];
|
| 850 |
tensor<int32, [4]> hidden_states_7_pad_0 = const()[name = tensor<string, []>("hidden_states_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 851 |
tensor<fp16, [384, 1536, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8142336)))];
|
| 852 |
tensor<fp16, [384]> layers_1_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9322048)))];
|
| 853 |
-
tensor<fp16, [1, 384, 1, 1500]> hidden_states_7_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations =
|
| 854 |
tensor<fp16, [1, 384, 1, 1500]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = hidden_states_7_cast_fp16)[name = tensor<string, []>("inputs_9_cast_fp16")];
|
| 855 |
tensor<int32, []> var_1184 = const()[name = tensor<string, []>("op_1184"), val = tensor<int32, []>(3)];
|
| 856 |
tensor<int32, []> var_1195 = const()[name = tensor<string, []>("op_1195"), val = tensor<int32, []>(1)];
|
|
@@ -861,26 +873,29 @@ program(1.0)
|
|
| 861 |
tensor<fp16, [384]> obj_9_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_9_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9323712)))];
|
| 862 |
tensor<fp16, []> obj_9_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_9_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 863 |
tensor<fp16, [1, 384, 1, 1500]> obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor<string, []>("obj_9_cast_fp16")];
|
| 864 |
-
tensor<
|
| 865 |
-
tensor<int32, [2]>
|
| 866 |
-
tensor<string, []> query_5_pad_type_0 = const()[name = tensor<string, []>("query_5_pad_type_0"), val = tensor<string, []>("custom")];
|
| 867 |
tensor<int32, [4]> query_5_pad_0 = const()[name = tensor<string, []>("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 868 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9324544)))];
|
| 869 |
tensor<fp16, [384]> layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9619520)))];
|
| 870 |
-
tensor<fp16, [1, 384, 1, 1500]> query_5_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations =
|
| 871 |
-
tensor<
|
| 872 |
-
tensor<int32, [2]>
|
| 873 |
-
tensor<string, []> key_5_pad_type_0 = const()[name = tensor<string, []>("key_5_pad_type_0"), val = tensor<string, []>("custom")];
|
| 874 |
tensor<int32, [4]> key_5_pad_0 = const()[name = tensor<string, []>("key_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 875 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9620352)))];
|
| 876 |
-
tensor<fp16, [1, 384, 1, 1500]> key_5_cast_fp16 = conv(dilations =
|
| 877 |
-
tensor<
|
| 878 |
-
tensor<int32, [2]>
|
| 879 |
-
tensor<string, []> value_5_pad_type_0 = const()[name = tensor<string, []>("value_5_pad_type_0"), val = tensor<string, []>("custom")];
|
| 880 |
tensor<int32, [4]> value_5_pad_0 = const()[name = tensor<string, []>("value_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 881 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9915328)))];
|
| 882 |
tensor<fp16, [384]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10210304)))];
|
| 883 |
-
tensor<fp16, [1, 384, 1, 1500]> value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations =
|
| 884 |
tensor<int32, [4]> var_1250_begin_0 = const()[name = tensor<string, []>("op_1250_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 885 |
tensor<int32, [4]> var_1250_end_0 = const()[name = tensor<string, []>("op_1250_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 886 |
tensor<bool, [4]> var_1250_end_mask_0 = const()[name = tensor<string, []>("op_1250_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
@@ -1233,13 +1248,14 @@ program(1.0)
|
|
| 1233 |
tensor<fp16, [1, 64, 1, 1500]> var_1669_cast_fp16 = concat(axis = var_1184, interleave = var_1669_interleave_0, values = (var_1651_cast_fp16, var_1653_cast_fp16, var_1655_cast_fp16, var_1657_cast_fp16))[name = tensor<string, []>("op_1669_cast_fp16")];
|
| 1234 |
tensor<bool, []> input_17_interleave_0 = const()[name = tensor<string, []>("input_17_interleave_0"), val = tensor<bool, []>(false)];
|
| 1235 |
tensor<fp16, [1, 384, 1, 1500]> input_17_cast_fp16 = concat(axis = var_1195, interleave = input_17_interleave_0, values = (var_1659_cast_fp16, var_1661_cast_fp16, var_1663_cast_fp16, var_1665_cast_fp16, var_1667_cast_fp16, var_1669_cast_fp16))[name = tensor<string, []>("input_17_cast_fp16")];
|
| 1236 |
-
tensor<
|
| 1237 |
-
tensor<int32, [2]>
|
| 1238 |
-
tensor<string, []> obj_11_pad_type_0 = const()[name = tensor<string, []>("obj_11_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1239 |
tensor<int32, [4]> obj_11_pad_0 = const()[name = tensor<string, []>("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1240 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10211136)))];
|
| 1241 |
tensor<fp16, [384]> layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10506112)))];
|
| 1242 |
-
tensor<fp16, [1, 384, 1, 1500]> obj_11_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations =
|
| 1243 |
tensor<fp16, [1, 384, 1, 1500]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_11_cast_fp16)[name = tensor<string, []>("inputs_11_cast_fp16")];
|
| 1244 |
tensor<int32, [1]> out_11_axes_0 = const()[name = tensor<string, []>("out_11_axes_0"), val = tensor<int32, [1]>([1])];
|
| 1245 |
tensor<fp16, []> var_1688_to_fp16 = const()[name = tensor<string, []>("op_1688_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
@@ -1248,22 +1264,24 @@ program(1.0)
|
|
| 1248 |
tensor<fp16, [384]> input_19_beta_0_to_fp16 = const()[name = tensor<string, []>("input_19_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10507776)))];
|
| 1249 |
tensor<fp16, []> input_19_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_19_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 1250 |
tensor<fp16, [1, 384, 1, 1500]> input_19_cast_fp16 = batch_norm(beta = input_19_beta_0_to_fp16, epsilon = input_19_epsilon_0_to_fp16, gamma = input_19_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
| 1251 |
-
tensor<
|
| 1252 |
-
tensor<int32, [2]>
|
| 1253 |
-
tensor<string, []> input_21_pad_type_0 = const()[name = tensor<string, []>("input_21_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1254 |
tensor<int32, [4]> input_21_pad_0 = const()[name = tensor<string, []>("input_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1255 |
tensor<fp16, [1536, 384, 1, 1]> layers_2_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10508608)))];
|
| 1256 |
tensor<fp16, [1536]> layers_2_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11688320)))];
|
| 1257 |
-
tensor<fp16, [1, 1536, 1, 1500]> input_21_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations =
|
| 1258 |
tensor<string, []> input_23_mode_0 = const()[name = tensor<string, []>("input_23_mode_0"), val = tensor<string, []>("EXACT")];
|
| 1259 |
tensor<fp16, [1, 1536, 1, 1500]> input_23_cast_fp16 = gelu(mode = input_23_mode_0, x = input_21_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
|
| 1260 |
-
tensor<
|
| 1261 |
-
tensor<int32, [2]>
|
| 1262 |
-
tensor<string, []> hidden_states_9_pad_type_0 = const()[name = tensor<string, []>("hidden_states_9_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1263 |
tensor<int32, [4]> hidden_states_9_pad_0 = const()[name = tensor<string, []>("hidden_states_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1264 |
tensor<fp16, [384, 1536, 1, 1]> layers_2_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11691456)))];
|
| 1265 |
tensor<fp16, [384]> layers_2_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12871168)))];
|
| 1266 |
-
tensor<fp16, [1, 384, 1, 1500]> hidden_states_9_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations =
|
| 1267 |
tensor<fp16, [1, 384, 1, 1500]> inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_9_cast_fp16)[name = tensor<string, []>("inputs_13_cast_fp16")];
|
| 1268 |
tensor<int32, []> var_1717 = const()[name = tensor<string, []>("op_1717"), val = tensor<int32, []>(3)];
|
| 1269 |
tensor<int32, []> var_1728 = const()[name = tensor<string, []>("op_1728"), val = tensor<int32, []>(1)];
|
|
@@ -1274,26 +1292,29 @@ program(1.0)
|
|
| 1274 |
tensor<fp16, [384]> obj_13_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_13_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12872832)))];
|
| 1275 |
tensor<fp16, []> obj_13_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_13_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 1276 |
tensor<fp16, [1, 384, 1, 1500]> obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_13_cast_fp16)[name = tensor<string, []>("obj_13_cast_fp16")];
|
| 1277 |
-
tensor<
|
| 1278 |
-
tensor<int32, [2]>
|
| 1279 |
-
tensor<string, []> query_pad_type_0 = const()[name = tensor<string, []>("query_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1280 |
tensor<int32, [4]> query_pad_0 = const()[name = tensor<string, []>("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1281 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12873664)))];
|
| 1282 |
tensor<fp16, [384]> layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13168640)))];
|
| 1283 |
-
tensor<fp16, [1, 384, 1, 1500]> query_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations =
|
| 1284 |
-
tensor<
|
| 1285 |
-
tensor<int32, [2]>
|
| 1286 |
-
tensor<string, []> key_pad_type_0 = const()[name = tensor<string, []>("key_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1287 |
tensor<int32, [4]> key_pad_0 = const()[name = tensor<string, []>("key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1288 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13169472)))];
|
| 1289 |
-
tensor<fp16, [1, 384, 1, 1500]> key_cast_fp16 = conv(dilations =
|
| 1290 |
-
tensor<
|
| 1291 |
-
tensor<int32, [2]>
|
| 1292 |
-
tensor<string, []> value_pad_type_0 = const()[name = tensor<string, []>("value_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1293 |
tensor<int32, [4]> value_pad_0 = const()[name = tensor<string, []>("value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1294 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13464448)))];
|
| 1295 |
tensor<fp16, [384]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13759424)))];
|
| 1296 |
-
tensor<fp16, [1, 384, 1, 1500]> value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations =
|
| 1297 |
tensor<int32, [4]> var_1783_begin_0 = const()[name = tensor<string, []>("op_1783_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1298 |
tensor<int32, [4]> var_1783_end_0 = const()[name = tensor<string, []>("op_1783_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 1299 |
tensor<bool, [4]> var_1783_end_mask_0 = const()[name = tensor<string, []>("op_1783_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
@@ -1646,13 +1667,14 @@ program(1.0)
|
|
| 1646 |
tensor<fp16, [1, 64, 1, 1500]> var_2202_cast_fp16 = concat(axis = var_1717, interleave = var_2202_interleave_0, values = (var_2184_cast_fp16, var_2186_cast_fp16, var_2188_cast_fp16, var_2190_cast_fp16))[name = tensor<string, []>("op_2202_cast_fp16")];
|
| 1647 |
tensor<bool, []> input_25_interleave_0 = const()[name = tensor<string, []>("input_25_interleave_0"), val = tensor<bool, []>(false)];
|
| 1648 |
tensor<fp16, [1, 384, 1, 1500]> input_25_cast_fp16 = concat(axis = var_1728, interleave = input_25_interleave_0, values = (var_2192_cast_fp16, var_2194_cast_fp16, var_2196_cast_fp16, var_2198_cast_fp16, var_2200_cast_fp16, var_2202_cast_fp16))[name = tensor<string, []>("input_25_cast_fp16")];
|
| 1649 |
-
tensor<
|
| 1650 |
-
tensor<int32, [2]>
|
| 1651 |
-
tensor<string, []> obj_pad_type_0 = const()[name = tensor<string, []>("obj_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1652 |
tensor<int32, [4]> obj_pad_0 = const()[name = tensor<string, []>("obj_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1653 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13760256)))];
|
| 1654 |
tensor<fp16, [384]> layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14055232)))];
|
| 1655 |
-
tensor<fp16, [1, 384, 1, 1500]> obj_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations =
|
| 1656 |
tensor<fp16, [1, 384, 1, 1500]> inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_cast_fp16)[name = tensor<string, []>("inputs_15_cast_fp16")];
|
| 1657 |
tensor<int32, [1]> out_15_axes_0 = const()[name = tensor<string, []>("out_15_axes_0"), val = tensor<int32, [1]>([1])];
|
| 1658 |
tensor<fp16, []> var_2221_to_fp16 = const()[name = tensor<string, []>("op_2221_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
@@ -1661,22 +1683,24 @@ program(1.0)
|
|
| 1661 |
tensor<fp16, [384]> input_27_beta_0_to_fp16 = const()[name = tensor<string, []>("input_27_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14056896)))];
|
| 1662 |
tensor<fp16, []> input_27_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_27_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 1663 |
tensor<fp16, [1, 384, 1, 1500]> input_27_cast_fp16 = batch_norm(beta = input_27_beta_0_to_fp16, epsilon = input_27_epsilon_0_to_fp16, gamma = input_27_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_15_cast_fp16)[name = tensor<string, []>("input_27_cast_fp16")];
|
| 1664 |
-
tensor<
|
| 1665 |
-
tensor<int32, [2]>
|
| 1666 |
-
tensor<string, []> input_29_pad_type_0 = const()[name = tensor<string, []>("input_29_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1667 |
tensor<int32, [4]> input_29_pad_0 = const()[name = tensor<string, []>("input_29_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1668 |
tensor<fp16, [1536, 384, 1, 1]> layers_3_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14057728)))];
|
| 1669 |
tensor<fp16, [1536]> layers_3_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15237440)))];
|
| 1670 |
-
tensor<fp16, [1, 1536, 1, 1500]> input_29_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations =
|
| 1671 |
tensor<string, []> input_mode_0 = const()[name = tensor<string, []>("input_mode_0"), val = tensor<string, []>("EXACT")];
|
| 1672 |
tensor<fp16, [1, 1536, 1, 1500]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_29_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
| 1673 |
-
tensor<
|
| 1674 |
-
tensor<int32, [2]>
|
| 1675 |
-
tensor<string, []> hidden_states_pad_type_0 = const()[name = tensor<string, []>("hidden_states_pad_type_0"), val = tensor<string, []>("custom")];
|
| 1676 |
tensor<int32, [4]> hidden_states_pad_0 = const()[name = tensor<string, []>("hidden_states_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
|
|
|
|
|
|
| 1677 |
tensor<fp16, [384, 1536, 1, 1]> layers_3_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15240576)))];
|
| 1678 |
tensor<fp16, [384]> layers_3_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16420288)))];
|
| 1679 |
-
tensor<fp16, [1, 384, 1, 1500]> hidden_states_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations =
|
| 1680 |
tensor<fp16, [1, 384, 1, 1500]> inputs_cast_fp16 = add(x = inputs_15_cast_fp16, y = hidden_states_cast_fp16)[name = tensor<string, []>("inputs_cast_fp16")];
|
| 1681 |
tensor<int32, [1]> out_axes_0 = const()[name = tensor<string, []>("out_axes_0"), val = tensor<int32, [1]>([1])];
|
| 1682 |
tensor<fp16, []> var_2259_to_fp16 = const()[name = tensor<string, []>("op_2259_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
|
|
| 1 |
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
|
| 3 |
{
|
| 4 |
func main<ios16>(tensor<fp16, [1, 80, 1, 3000]> melspectrogram_features) {
|
|
|
|
|
|
|
|
|
|
| 5 |
tensor<string, []> var_50_pad_type_0 = const()[name = tensor<string, []>("op_50_pad_type_0"), val = tensor<string, []>("custom")];
|
| 6 |
tensor<int32, [4]> var_50_pad_0 = const()[name = tensor<string, []>("op_50_pad_0"), val = tensor<int32, [4]>([0, 0, 1, 1])];
|
| 7 |
+
tensor<int32, [2]> var_50_strides_0 = const()[name = tensor<string, []>("op_50_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
| 8 |
+
tensor<int32, [2]> var_50_dilations_0 = const()[name = tensor<string, []>("op_50_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 9 |
+
tensor<int32, []> var_50_groups_0 = const()[name = tensor<string, []>("op_50_groups_0"), val = tensor<int32, []>(1)];
|
| 10 |
tensor<fp16, [384, 80, 1, 3]> var_25_to_fp16 = const()[name = tensor<string, []>("op_25_to_fp16"), val = tensor<fp16, [384, 80, 1, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(64)))];
|
| 11 |
tensor<fp16, [384]> var_31_to_fp16 = const()[name = tensor<string, []>("op_31_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(184448)))];
|
| 12 |
+
tensor<fp16, [1, 384, 1, 3000]> var_50_cast_fp16 = conv(bias = var_31_to_fp16, dilations = var_50_dilations_0, groups = var_50_groups_0, pad = var_50_pad_0, pad_type = var_50_pad_type_0, strides = var_50_strides_0, weight = var_25_to_fp16, x = melspectrogram_features)[name = tensor<string, []>("op_50_cast_fp16")];
|
| 13 |
tensor<string, []> hidden_states_1_mode_0 = const()[name = tensor<string, []>("hidden_states_1_mode_0"), val = tensor<string, []>("EXACT")];
|
| 14 |
tensor<fp16, [1, 384, 1, 3000]> hidden_states_1_cast_fp16 = gelu(mode = hidden_states_1_mode_0, x = var_50_cast_fp16)[name = tensor<string, []>("hidden_states_1_cast_fp16")];
|
|
|
|
|
|
|
|
|
|
| 15 |
tensor<string, []> var_90_pad_type_0 = const()[name = tensor<string, []>("op_90_pad_type_0"), val = tensor<string, []>("custom")];
|
| 16 |
tensor<int32, [4]> var_90_pad_0 = const()[name = tensor<string, []>("op_90_pad_0"), val = tensor<int32, [4]>([0, 0, 1, 1])];
|
| 17 |
+
tensor<int32, [2]> var_90_strides_0 = const()[name = tensor<string, []>("op_90_strides_0"), val = tensor<int32, [2]>([2, 2])];
|
| 18 |
+
tensor<int32, [2]> var_90_dilations_0 = const()[name = tensor<string, []>("op_90_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 19 |
+
tensor<int32, []> var_90_groups_0 = const()[name = tensor<string, []>("op_90_groups_0"), val = tensor<int32, []>(1)];
|
| 20 |
tensor<fp16, [384, 384, 1, 3]> var_65_to_fp16 = const()[name = tensor<string, []>("op_65_to_fp16"), val = tensor<fp16, [384, 384, 1, 3]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(185280)))];
|
| 21 |
tensor<fp16, [384]> var_71_to_fp16 = const()[name = tensor<string, []>("op_71_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1070080)))];
|
| 22 |
+
tensor<fp16, [1, 384, 1, 1500]> var_90_cast_fp16 = conv(bias = var_71_to_fp16, dilations = var_90_dilations_0, groups = var_90_groups_0, pad = var_90_pad_0, pad_type = var_90_pad_type_0, strides = var_90_strides_0, weight = var_65_to_fp16, x = hidden_states_1_cast_fp16)[name = tensor<string, []>("op_90_cast_fp16")];
|
| 23 |
tensor<string, []> hidden_states_3_mode_0 = const()[name = tensor<string, []>("hidden_states_3_mode_0"), val = tensor<string, []>("EXACT")];
|
| 24 |
tensor<fp16, [1, 384, 1, 1500]> hidden_states_3_cast_fp16 = gelu(mode = hidden_states_3_mode_0, x = var_90_cast_fp16)[name = tensor<string, []>("hidden_states_3_cast_fp16")];
|
| 25 |
tensor<fp16, [1, 384, 1, 1500]> var_108_to_fp16 = const()[name = tensor<string, []>("op_108_to_fp16"), val = tensor<fp16, [1, 384, 1, 1500]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(1070912)))];
|
|
|
|
| 35 |
tensor<fp16, [384]> obj_1_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_1_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2225472)))];
|
| 36 |
tensor<fp16, []> obj_1_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_1_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 37 |
tensor<fp16, [1, 384, 1, 1500]> obj_1_cast_fp16 = batch_norm(beta = obj_1_beta_0_to_fp16, epsilon = obj_1_epsilon_0_to_fp16, gamma = obj_1_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_1_cast_fp16)[name = tensor<string, []>("obj_1_cast_fp16")];
|
| 38 |
+
tensor<string, []> query_1_pad_type_0 = const()[name = tensor<string, []>("query_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 39 |
+
tensor<int32, [2]> query_1_strides_0 = const()[name = tensor<string, []>("query_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 40 |
tensor<int32, [4]> query_1_pad_0 = const()[name = tensor<string, []>("query_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 41 |
+
tensor<int32, [2]> query_1_dilations_0 = const()[name = tensor<string, []>("query_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 42 |
+
tensor<int32, []> query_1_groups_0 = const()[name = tensor<string, []>("query_1_groups_0"), val = tensor<int32, []>(1)];
|
| 43 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2226304)))];
|
| 44 |
tensor<fp16, [384]> layers_0_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2521280)))];
|
| 45 |
+
tensor<fp16, [1, 384, 1, 1500]> query_1_cast_fp16 = conv(bias = layers_0_self_attn_q_proj_bias_to_fp16, dilations = query_1_dilations_0, groups = query_1_groups_0, pad = query_1_pad_0, pad_type = query_1_pad_type_0, strides = query_1_strides_0, weight = layers_0_self_attn_q_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("query_1_cast_fp16")];
|
| 46 |
+
tensor<string, []> key_1_pad_type_0 = const()[name = tensor<string, []>("key_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 47 |
+
tensor<int32, [2]> key_1_strides_0 = const()[name = tensor<string, []>("key_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 48 |
tensor<int32, [4]> key_1_pad_0 = const()[name = tensor<string, []>("key_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 49 |
+
tensor<int32, [2]> key_1_dilations_0 = const()[name = tensor<string, []>("key_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 50 |
+
tensor<int32, []> key_1_groups_0 = const()[name = tensor<string, []>("key_1_groups_0"), val = tensor<int32, []>(1)];
|
| 51 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2522112)))];
|
| 52 |
+
tensor<fp16, [1, 384, 1, 1500]> key_1_cast_fp16 = conv(dilations = key_1_dilations_0, groups = key_1_groups_0, pad = key_1_pad_0, pad_type = key_1_pad_type_0, strides = key_1_strides_0, weight = layers_0_self_attn_k_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("key_1_cast_fp16")];
|
| 53 |
+
tensor<string, []> value_1_pad_type_0 = const()[name = tensor<string, []>("value_1_pad_type_0"), val = tensor<string, []>("valid")];
|
| 54 |
+
tensor<int32, [2]> value_1_strides_0 = const()[name = tensor<string, []>("value_1_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 55 |
tensor<int32, [4]> value_1_pad_0 = const()[name = tensor<string, []>("value_1_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 56 |
+
tensor<int32, [2]> value_1_dilations_0 = const()[name = tensor<string, []>("value_1_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 57 |
+
tensor<int32, []> value_1_groups_0 = const()[name = tensor<string, []>("value_1_groups_0"), val = tensor<int32, []>(1)];
|
| 58 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(2817088)))];
|
| 59 |
tensor<fp16, [384]> layers_0_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3112064)))];
|
| 60 |
+
tensor<fp16, [1, 384, 1, 1500]> value_1_cast_fp16 = conv(bias = layers_0_self_attn_v_proj_bias_to_fp16, dilations = value_1_dilations_0, groups = value_1_groups_0, pad = value_1_pad_0, pad_type = value_1_pad_type_0, strides = value_1_strides_0, weight = layers_0_self_attn_v_proj_weight_to_fp16, x = obj_1_cast_fp16)[name = tensor<string, []>("value_1_cast_fp16")];
|
| 61 |
tensor<int32, [4]> var_184_begin_0 = const()[name = tensor<string, []>("op_184_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 62 |
tensor<int32, [4]> var_184_end_0 = const()[name = tensor<string, []>("op_184_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 63 |
tensor<bool, [4]> var_184_end_mask_0 = const()[name = tensor<string, []>("op_184_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
|
|
| 410 |
tensor<fp16, [1, 64, 1, 1500]> var_603_cast_fp16 = concat(axis = var_118, interleave = var_603_interleave_0, values = (var_585_cast_fp16, var_587_cast_fp16, var_589_cast_fp16, var_591_cast_fp16))[name = tensor<string, []>("op_603_cast_fp16")];
|
| 411 |
tensor<bool, []> input_1_interleave_0 = const()[name = tensor<string, []>("input_1_interleave_0"), val = tensor<bool, []>(false)];
|
| 412 |
tensor<fp16, [1, 384, 1, 1500]> input_1_cast_fp16 = concat(axis = var_129, interleave = input_1_interleave_0, values = (var_593_cast_fp16, var_595_cast_fp16, var_597_cast_fp16, var_599_cast_fp16, var_601_cast_fp16, var_603_cast_fp16))[name = tensor<string, []>("input_1_cast_fp16")];
|
| 413 |
+
tensor<string, []> obj_3_pad_type_0 = const()[name = tensor<string, []>("obj_3_pad_type_0"), val = tensor<string, []>("valid")];
|
| 414 |
+
tensor<int32, [2]> obj_3_strides_0 = const()[name = tensor<string, []>("obj_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 415 |
tensor<int32, [4]> obj_3_pad_0 = const()[name = tensor<string, []>("obj_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 416 |
+
tensor<int32, [2]> obj_3_dilations_0 = const()[name = tensor<string, []>("obj_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 417 |
+
tensor<int32, []> obj_3_groups_0 = const()[name = tensor<string, []>("obj_3_groups_0"), val = tensor<int32, []>(1)];
|
| 418 |
tensor<fp16, [384, 384, 1, 1]> layers_0_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3112896)))];
|
| 419 |
tensor<fp16, [384]> layers_0_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3407872)))];
|
| 420 |
+
tensor<fp16, [1, 384, 1, 1500]> obj_3_cast_fp16 = conv(bias = layers_0_self_attn_o_proj_bias_to_fp16, dilations = obj_3_dilations_0, groups = obj_3_groups_0, pad = obj_3_pad_0, pad_type = obj_3_pad_type_0, strides = obj_3_strides_0, weight = layers_0_self_attn_o_proj_weight_to_fp16, x = input_1_cast_fp16)[name = tensor<string, []>("obj_3_cast_fp16")];
|
| 421 |
tensor<fp16, [1, 384, 1, 1500]> inputs_3_cast_fp16 = add(x = inputs_1_cast_fp16, y = obj_3_cast_fp16)[name = tensor<string, []>("inputs_3_cast_fp16")];
|
| 422 |
tensor<int32, [1]> out_3_axes_0 = const()[name = tensor<string, []>("out_3_axes_0"), val = tensor<int32, [1]>([1])];
|
| 423 |
tensor<fp16, []> var_622_to_fp16 = const()[name = tensor<string, []>("op_622_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
|
|
| 426 |
tensor<fp16, [384]> input_3_beta_0_to_fp16 = const()[name = tensor<string, []>("input_3_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3409536)))];
|
| 427 |
tensor<fp16, []> input_3_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_3_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 428 |
tensor<fp16, [1, 384, 1, 1500]> input_3_cast_fp16 = batch_norm(beta = input_3_beta_0_to_fp16, epsilon = input_3_epsilon_0_to_fp16, gamma = input_3_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_3_cast_fp16)[name = tensor<string, []>("input_3_cast_fp16")];
|
| 429 |
+
tensor<string, []> input_5_pad_type_0 = const()[name = tensor<string, []>("input_5_pad_type_0"), val = tensor<string, []>("valid")];
|
| 430 |
+
tensor<int32, [2]> input_5_strides_0 = const()[name = tensor<string, []>("input_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 431 |
tensor<int32, [4]> input_5_pad_0 = const()[name = tensor<string, []>("input_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 432 |
+
tensor<int32, [2]> input_5_dilations_0 = const()[name = tensor<string, []>("input_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 433 |
+
tensor<int32, []> input_5_groups_0 = const()[name = tensor<string, []>("input_5_groups_0"), val = tensor<int32, []>(1)];
|
| 434 |
tensor<fp16, [1536, 384, 1, 1]> layers_0_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(3410368)))];
|
| 435 |
tensor<fp16, [1536]> layers_0_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4590080)))];
|
| 436 |
+
tensor<fp16, [1, 1536, 1, 1500]> input_5_cast_fp16 = conv(bias = layers_0_fc1_bias_to_fp16, dilations = input_5_dilations_0, groups = input_5_groups_0, pad = input_5_pad_0, pad_type = input_5_pad_type_0, strides = input_5_strides_0, weight = layers_0_fc1_weight_to_fp16, x = input_3_cast_fp16)[name = tensor<string, []>("input_5_cast_fp16")];
|
| 437 |
tensor<string, []> input_7_mode_0 = const()[name = tensor<string, []>("input_7_mode_0"), val = tensor<string, []>("EXACT")];
|
| 438 |
tensor<fp16, [1, 1536, 1, 1500]> input_7_cast_fp16 = gelu(mode = input_7_mode_0, x = input_5_cast_fp16)[name = tensor<string, []>("input_7_cast_fp16")];
|
| 439 |
+
tensor<string, []> hidden_states_5_pad_type_0 = const()[name = tensor<string, []>("hidden_states_5_pad_type_0"), val = tensor<string, []>("valid")];
|
| 440 |
+
tensor<int32, [2]> hidden_states_5_strides_0 = const()[name = tensor<string, []>("hidden_states_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 441 |
tensor<int32, [4]> hidden_states_5_pad_0 = const()[name = tensor<string, []>("hidden_states_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 442 |
+
tensor<int32, [2]> hidden_states_5_dilations_0 = const()[name = tensor<string, []>("hidden_states_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 443 |
+
tensor<int32, []> hidden_states_5_groups_0 = const()[name = tensor<string, []>("hidden_states_5_groups_0"), val = tensor<int32, []>(1)];
|
| 444 |
tensor<fp16, [384, 1536, 1, 1]> layers_0_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(4593216)))];
|
| 445 |
tensor<fp16, [384]> layers_0_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_0_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5772928)))];
|
| 446 |
+
tensor<fp16, [1, 384, 1, 1500]> hidden_states_5_cast_fp16 = conv(bias = layers_0_fc2_bias_to_fp16, dilations = hidden_states_5_dilations_0, groups = hidden_states_5_groups_0, pad = hidden_states_5_pad_0, pad_type = hidden_states_5_pad_type_0, strides = hidden_states_5_strides_0, weight = layers_0_fc2_weight_to_fp16, x = input_7_cast_fp16)[name = tensor<string, []>("hidden_states_5_cast_fp16")];
|
| 447 |
tensor<fp16, [1, 384, 1, 1500]> inputs_5_cast_fp16 = add(x = inputs_3_cast_fp16, y = hidden_states_5_cast_fp16)[name = tensor<string, []>("inputs_5_cast_fp16")];
|
| 448 |
tensor<int32, []> var_651 = const()[name = tensor<string, []>("op_651"), val = tensor<int32, []>(3)];
|
| 449 |
tensor<int32, []> var_662 = const()[name = tensor<string, []>("op_662"), val = tensor<int32, []>(1)];
|
|
|
|
| 454 |
tensor<fp16, [384]> obj_5_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_5_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5774592)))];
|
| 455 |
tensor<fp16, []> obj_5_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_5_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 456 |
tensor<fp16, [1, 384, 1, 1500]> obj_5_cast_fp16 = batch_norm(beta = obj_5_beta_0_to_fp16, epsilon = obj_5_epsilon_0_to_fp16, gamma = obj_5_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_5_cast_fp16)[name = tensor<string, []>("obj_5_cast_fp16")];
|
| 457 |
+
tensor<string, []> query_3_pad_type_0 = const()[name = tensor<string, []>("query_3_pad_type_0"), val = tensor<string, []>("valid")];
|
| 458 |
+
tensor<int32, [2]> query_3_strides_0 = const()[name = tensor<string, []>("query_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 459 |
tensor<int32, [4]> query_3_pad_0 = const()[name = tensor<string, []>("query_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 460 |
+
tensor<int32, [2]> query_3_dilations_0 = const()[name = tensor<string, []>("query_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 461 |
+
tensor<int32, []> query_3_groups_0 = const()[name = tensor<string, []>("query_3_groups_0"), val = tensor<int32, []>(1)];
|
| 462 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(5775424)))];
|
| 463 |
tensor<fp16, [384]> layers_1_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6070400)))];
|
| 464 |
+
tensor<fp16, [1, 384, 1, 1500]> query_3_cast_fp16 = conv(bias = layers_1_self_attn_q_proj_bias_to_fp16, dilations = query_3_dilations_0, groups = query_3_groups_0, pad = query_3_pad_0, pad_type = query_3_pad_type_0, strides = query_3_strides_0, weight = layers_1_self_attn_q_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = tensor<string, []>("query_3_cast_fp16")];
|
| 465 |
+
tensor<string, []> key_3_pad_type_0 = const()[name = tensor<string, []>("key_3_pad_type_0"), val = tensor<string, []>("valid")];
|
| 466 |
+
tensor<int32, [2]> key_3_strides_0 = const()[name = tensor<string, []>("key_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 467 |
tensor<int32, [4]> key_3_pad_0 = const()[name = tensor<string, []>("key_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 468 |
+
tensor<int32, [2]> key_3_dilations_0 = const()[name = tensor<string, []>("key_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 469 |
+
tensor<int32, []> key_3_groups_0 = const()[name = tensor<string, []>("key_3_groups_0"), val = tensor<int32, []>(1)];
|
| 470 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6071232)))];
|
| 471 |
+
tensor<fp16, [1, 384, 1, 1500]> key_3_cast_fp16 = conv(dilations = key_3_dilations_0, groups = key_3_groups_0, pad = key_3_pad_0, pad_type = key_3_pad_type_0, strides = key_3_strides_0, weight = layers_1_self_attn_k_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = tensor<string, []>("key_3_cast_fp16")];
|
| 472 |
+
tensor<string, []> value_3_pad_type_0 = const()[name = tensor<string, []>("value_3_pad_type_0"), val = tensor<string, []>("valid")];
|
| 473 |
+
tensor<int32, [2]> value_3_strides_0 = const()[name = tensor<string, []>("value_3_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 474 |
tensor<int32, [4]> value_3_pad_0 = const()[name = tensor<string, []>("value_3_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 475 |
+
tensor<int32, [2]> value_3_dilations_0 = const()[name = tensor<string, []>("value_3_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 476 |
+
tensor<int32, []> value_3_groups_0 = const()[name = tensor<string, []>("value_3_groups_0"), val = tensor<int32, []>(1)];
|
| 477 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6366208)))];
|
| 478 |
tensor<fp16, [384]> layers_1_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6661184)))];
|
| 479 |
+
tensor<fp16, [1, 384, 1, 1500]> value_3_cast_fp16 = conv(bias = layers_1_self_attn_v_proj_bias_to_fp16, dilations = value_3_dilations_0, groups = value_3_groups_0, pad = value_3_pad_0, pad_type = value_3_pad_type_0, strides = value_3_strides_0, weight = layers_1_self_attn_v_proj_weight_to_fp16, x = obj_5_cast_fp16)[name = tensor<string, []>("value_3_cast_fp16")];
|
| 480 |
tensor<int32, [4]> var_717_begin_0 = const()[name = tensor<string, []>("op_717_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 481 |
tensor<int32, [4]> var_717_end_0 = const()[name = tensor<string, []>("op_717_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 482 |
tensor<bool, [4]> var_717_end_mask_0 = const()[name = tensor<string, []>("op_717_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
|
|
| 829 |
tensor<fp16, [1, 64, 1, 1500]> var_1136_cast_fp16 = concat(axis = var_651, interleave = var_1136_interleave_0, values = (var_1118_cast_fp16, var_1120_cast_fp16, var_1122_cast_fp16, var_1124_cast_fp16))[name = tensor<string, []>("op_1136_cast_fp16")];
|
| 830 |
tensor<bool, []> input_9_interleave_0 = const()[name = tensor<string, []>("input_9_interleave_0"), val = tensor<bool, []>(false)];
|
| 831 |
tensor<fp16, [1, 384, 1, 1500]> input_9_cast_fp16 = concat(axis = var_662, interleave = input_9_interleave_0, values = (var_1126_cast_fp16, var_1128_cast_fp16, var_1130_cast_fp16, var_1132_cast_fp16, var_1134_cast_fp16, var_1136_cast_fp16))[name = tensor<string, []>("input_9_cast_fp16")];
|
| 832 |
+
tensor<string, []> obj_7_pad_type_0 = const()[name = tensor<string, []>("obj_7_pad_type_0"), val = tensor<string, []>("valid")];
|
| 833 |
+
tensor<int32, [2]> obj_7_strides_0 = const()[name = tensor<string, []>("obj_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 834 |
tensor<int32, [4]> obj_7_pad_0 = const()[name = tensor<string, []>("obj_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 835 |
+
tensor<int32, [2]> obj_7_dilations_0 = const()[name = tensor<string, []>("obj_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 836 |
+
tensor<int32, []> obj_7_groups_0 = const()[name = tensor<string, []>("obj_7_groups_0"), val = tensor<int32, []>(1)];
|
| 837 |
tensor<fp16, [384, 384, 1, 1]> layers_1_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6662016)))];
|
| 838 |
tensor<fp16, [384]> layers_1_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6956992)))];
|
| 839 |
+
tensor<fp16, [1, 384, 1, 1500]> obj_7_cast_fp16 = conv(bias = layers_1_self_attn_o_proj_bias_to_fp16, dilations = obj_7_dilations_0, groups = obj_7_groups_0, pad = obj_7_pad_0, pad_type = obj_7_pad_type_0, strides = obj_7_strides_0, weight = layers_1_self_attn_o_proj_weight_to_fp16, x = input_9_cast_fp16)[name = tensor<string, []>("obj_7_cast_fp16")];
|
| 840 |
tensor<fp16, [1, 384, 1, 1500]> inputs_7_cast_fp16 = add(x = inputs_5_cast_fp16, y = obj_7_cast_fp16)[name = tensor<string, []>("inputs_7_cast_fp16")];
|
| 841 |
tensor<int32, [1]> out_7_axes_0 = const()[name = tensor<string, []>("out_7_axes_0"), val = tensor<int32, [1]>([1])];
|
| 842 |
tensor<fp16, []> var_1155_to_fp16 = const()[name = tensor<string, []>("op_1155_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
|
|
| 845 |
tensor<fp16, [384]> input_11_beta_0_to_fp16 = const()[name = tensor<string, []>("input_11_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6958656)))];
|
| 846 |
tensor<fp16, []> input_11_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_11_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 847 |
tensor<fp16, [1, 384, 1, 1500]> input_11_cast_fp16 = batch_norm(beta = input_11_beta_0_to_fp16, epsilon = input_11_epsilon_0_to_fp16, gamma = input_11_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_7_cast_fp16)[name = tensor<string, []>("input_11_cast_fp16")];
|
| 848 |
+
tensor<string, []> input_13_pad_type_0 = const()[name = tensor<string, []>("input_13_pad_type_0"), val = tensor<string, []>("valid")];
|
| 849 |
+
tensor<int32, [2]> input_13_strides_0 = const()[name = tensor<string, []>("input_13_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 850 |
tensor<int32, [4]> input_13_pad_0 = const()[name = tensor<string, []>("input_13_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 851 |
+
tensor<int32, [2]> input_13_dilations_0 = const()[name = tensor<string, []>("input_13_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 852 |
+
tensor<int32, []> input_13_groups_0 = const()[name = tensor<string, []>("input_13_groups_0"), val = tensor<int32, []>(1)];
|
| 853 |
tensor<fp16, [1536, 384, 1, 1]> layers_1_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(6959488)))];
|
| 854 |
tensor<fp16, [1536]> layers_1_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8139200)))];
|
| 855 |
+
tensor<fp16, [1, 1536, 1, 1500]> input_13_cast_fp16 = conv(bias = layers_1_fc1_bias_to_fp16, dilations = input_13_dilations_0, groups = input_13_groups_0, pad = input_13_pad_0, pad_type = input_13_pad_type_0, strides = input_13_strides_0, weight = layers_1_fc1_weight_to_fp16, x = input_11_cast_fp16)[name = tensor<string, []>("input_13_cast_fp16")];
|
| 856 |
tensor<string, []> input_15_mode_0 = const()[name = tensor<string, []>("input_15_mode_0"), val = tensor<string, []>("EXACT")];
|
| 857 |
tensor<fp16, [1, 1536, 1, 1500]> input_15_cast_fp16 = gelu(mode = input_15_mode_0, x = input_13_cast_fp16)[name = tensor<string, []>("input_15_cast_fp16")];
|
| 858 |
+
tensor<string, []> hidden_states_7_pad_type_0 = const()[name = tensor<string, []>("hidden_states_7_pad_type_0"), val = tensor<string, []>("valid")];
|
| 859 |
+
tensor<int32, [2]> hidden_states_7_strides_0 = const()[name = tensor<string, []>("hidden_states_7_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 860 |
tensor<int32, [4]> hidden_states_7_pad_0 = const()[name = tensor<string, []>("hidden_states_7_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 861 |
+
tensor<int32, [2]> hidden_states_7_dilations_0 = const()[name = tensor<string, []>("hidden_states_7_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 862 |
+
tensor<int32, []> hidden_states_7_groups_0 = const()[name = tensor<string, []>("hidden_states_7_groups_0"), val = tensor<int32, []>(1)];
|
| 863 |
tensor<fp16, [384, 1536, 1, 1]> layers_1_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(8142336)))];
|
| 864 |
tensor<fp16, [384]> layers_1_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_1_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9322048)))];
|
| 865 |
+
tensor<fp16, [1, 384, 1, 1500]> hidden_states_7_cast_fp16 = conv(bias = layers_1_fc2_bias_to_fp16, dilations = hidden_states_7_dilations_0, groups = hidden_states_7_groups_0, pad = hidden_states_7_pad_0, pad_type = hidden_states_7_pad_type_0, strides = hidden_states_7_strides_0, weight = layers_1_fc2_weight_to_fp16, x = input_15_cast_fp16)[name = tensor<string, []>("hidden_states_7_cast_fp16")];
|
| 866 |
tensor<fp16, [1, 384, 1, 1500]> inputs_9_cast_fp16 = add(x = inputs_7_cast_fp16, y = hidden_states_7_cast_fp16)[name = tensor<string, []>("inputs_9_cast_fp16")];
|
| 867 |
tensor<int32, []> var_1184 = const()[name = tensor<string, []>("op_1184"), val = tensor<int32, []>(3)];
|
| 868 |
tensor<int32, []> var_1195 = const()[name = tensor<string, []>("op_1195"), val = tensor<int32, []>(1)];
|
|
|
|
| 873 |
tensor<fp16, [384]> obj_9_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_9_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9323712)))];
|
| 874 |
tensor<fp16, []> obj_9_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_9_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 875 |
tensor<fp16, [1, 384, 1, 1500]> obj_9_cast_fp16 = batch_norm(beta = obj_9_beta_0_to_fp16, epsilon = obj_9_epsilon_0_to_fp16, gamma = obj_9_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_9_cast_fp16)[name = tensor<string, []>("obj_9_cast_fp16")];
|
| 876 |
+
tensor<string, []> query_5_pad_type_0 = const()[name = tensor<string, []>("query_5_pad_type_0"), val = tensor<string, []>("valid")];
|
| 877 |
+
tensor<int32, [2]> query_5_strides_0 = const()[name = tensor<string, []>("query_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 878 |
tensor<int32, [4]> query_5_pad_0 = const()[name = tensor<string, []>("query_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 879 |
+
tensor<int32, [2]> query_5_dilations_0 = const()[name = tensor<string, []>("query_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 880 |
+
tensor<int32, []> query_5_groups_0 = const()[name = tensor<string, []>("query_5_groups_0"), val = tensor<int32, []>(1)];
|
| 881 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9324544)))];
|
| 882 |
tensor<fp16, [384]> layers_2_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9619520)))];
|
| 883 |
+
tensor<fp16, [1, 384, 1, 1500]> query_5_cast_fp16 = conv(bias = layers_2_self_attn_q_proj_bias_to_fp16, dilations = query_5_dilations_0, groups = query_5_groups_0, pad = query_5_pad_0, pad_type = query_5_pad_type_0, strides = query_5_strides_0, weight = layers_2_self_attn_q_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor<string, []>("query_5_cast_fp16")];
|
| 884 |
+
tensor<string, []> key_5_pad_type_0 = const()[name = tensor<string, []>("key_5_pad_type_0"), val = tensor<string, []>("valid")];
|
| 885 |
+
tensor<int32, [2]> key_5_strides_0 = const()[name = tensor<string, []>("key_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 886 |
tensor<int32, [4]> key_5_pad_0 = const()[name = tensor<string, []>("key_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 887 |
+
tensor<int32, [2]> key_5_dilations_0 = const()[name = tensor<string, []>("key_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 888 |
+
tensor<int32, []> key_5_groups_0 = const()[name = tensor<string, []>("key_5_groups_0"), val = tensor<int32, []>(1)];
|
| 889 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9620352)))];
|
| 890 |
+
tensor<fp16, [1, 384, 1, 1500]> key_5_cast_fp16 = conv(dilations = key_5_dilations_0, groups = key_5_groups_0, pad = key_5_pad_0, pad_type = key_5_pad_type_0, strides = key_5_strides_0, weight = layers_2_self_attn_k_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor<string, []>("key_5_cast_fp16")];
|
| 891 |
+
tensor<string, []> value_5_pad_type_0 = const()[name = tensor<string, []>("value_5_pad_type_0"), val = tensor<string, []>("valid")];
|
| 892 |
+
tensor<int32, [2]> value_5_strides_0 = const()[name = tensor<string, []>("value_5_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 893 |
tensor<int32, [4]> value_5_pad_0 = const()[name = tensor<string, []>("value_5_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 894 |
+
tensor<int32, [2]> value_5_dilations_0 = const()[name = tensor<string, []>("value_5_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 895 |
+
tensor<int32, []> value_5_groups_0 = const()[name = tensor<string, []>("value_5_groups_0"), val = tensor<int32, []>(1)];
|
| 896 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(9915328)))];
|
| 897 |
tensor<fp16, [384]> layers_2_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10210304)))];
|
| 898 |
+
tensor<fp16, [1, 384, 1, 1500]> value_5_cast_fp16 = conv(bias = layers_2_self_attn_v_proj_bias_to_fp16, dilations = value_5_dilations_0, groups = value_5_groups_0, pad = value_5_pad_0, pad_type = value_5_pad_type_0, strides = value_5_strides_0, weight = layers_2_self_attn_v_proj_weight_to_fp16, x = obj_9_cast_fp16)[name = tensor<string, []>("value_5_cast_fp16")];
|
| 899 |
tensor<int32, [4]> var_1250_begin_0 = const()[name = tensor<string, []>("op_1250_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 900 |
tensor<int32, [4]> var_1250_end_0 = const()[name = tensor<string, []>("op_1250_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 901 |
tensor<bool, [4]> var_1250_end_mask_0 = const()[name = tensor<string, []>("op_1250_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
|
|
| 1248 |
tensor<fp16, [1, 64, 1, 1500]> var_1669_cast_fp16 = concat(axis = var_1184, interleave = var_1669_interleave_0, values = (var_1651_cast_fp16, var_1653_cast_fp16, var_1655_cast_fp16, var_1657_cast_fp16))[name = tensor<string, []>("op_1669_cast_fp16")];
|
| 1249 |
tensor<bool, []> input_17_interleave_0 = const()[name = tensor<string, []>("input_17_interleave_0"), val = tensor<bool, []>(false)];
|
| 1250 |
tensor<fp16, [1, 384, 1, 1500]> input_17_cast_fp16 = concat(axis = var_1195, interleave = input_17_interleave_0, values = (var_1659_cast_fp16, var_1661_cast_fp16, var_1663_cast_fp16, var_1665_cast_fp16, var_1667_cast_fp16, var_1669_cast_fp16))[name = tensor<string, []>("input_17_cast_fp16")];
|
| 1251 |
+
tensor<string, []> obj_11_pad_type_0 = const()[name = tensor<string, []>("obj_11_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1252 |
+
tensor<int32, [2]> obj_11_strides_0 = const()[name = tensor<string, []>("obj_11_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1253 |
tensor<int32, [4]> obj_11_pad_0 = const()[name = tensor<string, []>("obj_11_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1254 |
+
tensor<int32, [2]> obj_11_dilations_0 = const()[name = tensor<string, []>("obj_11_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1255 |
+
tensor<int32, []> obj_11_groups_0 = const()[name = tensor<string, []>("obj_11_groups_0"), val = tensor<int32, []>(1)];
|
| 1256 |
tensor<fp16, [384, 384, 1, 1]> layers_2_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10211136)))];
|
| 1257 |
tensor<fp16, [384]> layers_2_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10506112)))];
|
| 1258 |
+
tensor<fp16, [1, 384, 1, 1500]> obj_11_cast_fp16 = conv(bias = layers_2_self_attn_o_proj_bias_to_fp16, dilations = obj_11_dilations_0, groups = obj_11_groups_0, pad = obj_11_pad_0, pad_type = obj_11_pad_type_0, strides = obj_11_strides_0, weight = layers_2_self_attn_o_proj_weight_to_fp16, x = input_17_cast_fp16)[name = tensor<string, []>("obj_11_cast_fp16")];
|
| 1259 |
tensor<fp16, [1, 384, 1, 1500]> inputs_11_cast_fp16 = add(x = inputs_9_cast_fp16, y = obj_11_cast_fp16)[name = tensor<string, []>("inputs_11_cast_fp16")];
|
| 1260 |
tensor<int32, [1]> out_11_axes_0 = const()[name = tensor<string, []>("out_11_axes_0"), val = tensor<int32, [1]>([1])];
|
| 1261 |
tensor<fp16, []> var_1688_to_fp16 = const()[name = tensor<string, []>("op_1688_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
|
|
| 1264 |
tensor<fp16, [384]> input_19_beta_0_to_fp16 = const()[name = tensor<string, []>("input_19_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10507776)))];
|
| 1265 |
tensor<fp16, []> input_19_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_19_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 1266 |
tensor<fp16, [1, 384, 1, 1500]> input_19_cast_fp16 = batch_norm(beta = input_19_beta_0_to_fp16, epsilon = input_19_epsilon_0_to_fp16, gamma = input_19_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_11_cast_fp16)[name = tensor<string, []>("input_19_cast_fp16")];
|
| 1267 |
+
tensor<string, []> input_21_pad_type_0 = const()[name = tensor<string, []>("input_21_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1268 |
+
tensor<int32, [2]> input_21_strides_0 = const()[name = tensor<string, []>("input_21_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1269 |
tensor<int32, [4]> input_21_pad_0 = const()[name = tensor<string, []>("input_21_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1270 |
+
tensor<int32, [2]> input_21_dilations_0 = const()[name = tensor<string, []>("input_21_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1271 |
+
tensor<int32, []> input_21_groups_0 = const()[name = tensor<string, []>("input_21_groups_0"), val = tensor<int32, []>(1)];
|
| 1272 |
tensor<fp16, [1536, 384, 1, 1]> layers_2_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(10508608)))];
|
| 1273 |
tensor<fp16, [1536]> layers_2_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11688320)))];
|
| 1274 |
+
tensor<fp16, [1, 1536, 1, 1500]> input_21_cast_fp16 = conv(bias = layers_2_fc1_bias_to_fp16, dilations = input_21_dilations_0, groups = input_21_groups_0, pad = input_21_pad_0, pad_type = input_21_pad_type_0, strides = input_21_strides_0, weight = layers_2_fc1_weight_to_fp16, x = input_19_cast_fp16)[name = tensor<string, []>("input_21_cast_fp16")];
|
| 1275 |
tensor<string, []> input_23_mode_0 = const()[name = tensor<string, []>("input_23_mode_0"), val = tensor<string, []>("EXACT")];
|
| 1276 |
tensor<fp16, [1, 1536, 1, 1500]> input_23_cast_fp16 = gelu(mode = input_23_mode_0, x = input_21_cast_fp16)[name = tensor<string, []>("input_23_cast_fp16")];
|
| 1277 |
+
tensor<string, []> hidden_states_9_pad_type_0 = const()[name = tensor<string, []>("hidden_states_9_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1278 |
+
tensor<int32, [2]> hidden_states_9_strides_0 = const()[name = tensor<string, []>("hidden_states_9_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1279 |
tensor<int32, [4]> hidden_states_9_pad_0 = const()[name = tensor<string, []>("hidden_states_9_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1280 |
+
tensor<int32, [2]> hidden_states_9_dilations_0 = const()[name = tensor<string, []>("hidden_states_9_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1281 |
+
tensor<int32, []> hidden_states_9_groups_0 = const()[name = tensor<string, []>("hidden_states_9_groups_0"), val = tensor<int32, []>(1)];
|
| 1282 |
tensor<fp16, [384, 1536, 1, 1]> layers_2_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_2_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(11691456)))];
|
| 1283 |
tensor<fp16, [384]> layers_2_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_2_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12871168)))];
|
| 1284 |
+
tensor<fp16, [1, 384, 1, 1500]> hidden_states_9_cast_fp16 = conv(bias = layers_2_fc2_bias_to_fp16, dilations = hidden_states_9_dilations_0, groups = hidden_states_9_groups_0, pad = hidden_states_9_pad_0, pad_type = hidden_states_9_pad_type_0, strides = hidden_states_9_strides_0, weight = layers_2_fc2_weight_to_fp16, x = input_23_cast_fp16)[name = tensor<string, []>("hidden_states_9_cast_fp16")];
|
| 1285 |
tensor<fp16, [1, 384, 1, 1500]> inputs_13_cast_fp16 = add(x = inputs_11_cast_fp16, y = hidden_states_9_cast_fp16)[name = tensor<string, []>("inputs_13_cast_fp16")];
|
| 1286 |
tensor<int32, []> var_1717 = const()[name = tensor<string, []>("op_1717"), val = tensor<int32, []>(3)];
|
| 1287 |
tensor<int32, []> var_1728 = const()[name = tensor<string, []>("op_1728"), val = tensor<int32, []>(1)];
|
|
|
|
| 1292 |
tensor<fp16, [384]> obj_13_beta_0_to_fp16 = const()[name = tensor<string, []>("obj_13_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12872832)))];
|
| 1293 |
tensor<fp16, []> obj_13_epsilon_0_to_fp16 = const()[name = tensor<string, []>("obj_13_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 1294 |
tensor<fp16, [1, 384, 1, 1500]> obj_13_cast_fp16 = batch_norm(beta = obj_13_beta_0_to_fp16, epsilon = obj_13_epsilon_0_to_fp16, gamma = obj_13_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_13_cast_fp16)[name = tensor<string, []>("obj_13_cast_fp16")];
|
| 1295 |
+
tensor<string, []> query_pad_type_0 = const()[name = tensor<string, []>("query_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1296 |
+
tensor<int32, [2]> query_strides_0 = const()[name = tensor<string, []>("query_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1297 |
tensor<int32, [4]> query_pad_0 = const()[name = tensor<string, []>("query_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1298 |
+
tensor<int32, [2]> query_dilations_0 = const()[name = tensor<string, []>("query_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1299 |
+
tensor<int32, []> query_groups_0 = const()[name = tensor<string, []>("query_groups_0"), val = tensor<int32, []>(1)];
|
| 1300 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_q_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_q_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(12873664)))];
|
| 1301 |
tensor<fp16, [384]> layers_3_self_attn_q_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_q_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13168640)))];
|
| 1302 |
+
tensor<fp16, [1, 384, 1, 1500]> query_cast_fp16 = conv(bias = layers_3_self_attn_q_proj_bias_to_fp16, dilations = query_dilations_0, groups = query_groups_0, pad = query_pad_0, pad_type = query_pad_type_0, strides = query_strides_0, weight = layers_3_self_attn_q_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = tensor<string, []>("query_cast_fp16")];
|
| 1303 |
+
tensor<string, []> key_pad_type_0 = const()[name = tensor<string, []>("key_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1304 |
+
tensor<int32, [2]> key_strides_0 = const()[name = tensor<string, []>("key_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1305 |
tensor<int32, [4]> key_pad_0 = const()[name = tensor<string, []>("key_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1306 |
+
tensor<int32, [2]> key_dilations_0 = const()[name = tensor<string, []>("key_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1307 |
+
tensor<int32, []> key_groups_0 = const()[name = tensor<string, []>("key_groups_0"), val = tensor<int32, []>(1)];
|
| 1308 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_k_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_k_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13169472)))];
|
| 1309 |
+
tensor<fp16, [1, 384, 1, 1500]> key_cast_fp16 = conv(dilations = key_dilations_0, groups = key_groups_0, pad = key_pad_0, pad_type = key_pad_type_0, strides = key_strides_0, weight = layers_3_self_attn_k_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = tensor<string, []>("key_cast_fp16")];
|
| 1310 |
+
tensor<string, []> value_pad_type_0 = const()[name = tensor<string, []>("value_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1311 |
+
tensor<int32, [2]> value_strides_0 = const()[name = tensor<string, []>("value_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1312 |
tensor<int32, [4]> value_pad_0 = const()[name = tensor<string, []>("value_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1313 |
+
tensor<int32, [2]> value_dilations_0 = const()[name = tensor<string, []>("value_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1314 |
+
tensor<int32, []> value_groups_0 = const()[name = tensor<string, []>("value_groups_0"), val = tensor<int32, []>(1)];
|
| 1315 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_v_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13464448)))];
|
| 1316 |
tensor<fp16, [384]> layers_3_self_attn_v_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_v_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13759424)))];
|
| 1317 |
+
tensor<fp16, [1, 384, 1, 1500]> value_cast_fp16 = conv(bias = layers_3_self_attn_v_proj_bias_to_fp16, dilations = value_dilations_0, groups = value_groups_0, pad = value_pad_0, pad_type = value_pad_type_0, strides = value_strides_0, weight = layers_3_self_attn_v_proj_weight_to_fp16, x = obj_13_cast_fp16)[name = tensor<string, []>("value_cast_fp16")];
|
| 1318 |
tensor<int32, [4]> var_1783_begin_0 = const()[name = tensor<string, []>("op_1783_begin_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1319 |
tensor<int32, [4]> var_1783_end_0 = const()[name = tensor<string, []>("op_1783_end_0"), val = tensor<int32, [4]>([1, 64, 1, 1500])];
|
| 1320 |
tensor<bool, [4]> var_1783_end_mask_0 = const()[name = tensor<string, []>("op_1783_end_mask_0"), val = tensor<bool, [4]>([true, false, true, true])];
|
|
|
|
| 1667 |
tensor<fp16, [1, 64, 1, 1500]> var_2202_cast_fp16 = concat(axis = var_1717, interleave = var_2202_interleave_0, values = (var_2184_cast_fp16, var_2186_cast_fp16, var_2188_cast_fp16, var_2190_cast_fp16))[name = tensor<string, []>("op_2202_cast_fp16")];
|
| 1668 |
tensor<bool, []> input_25_interleave_0 = const()[name = tensor<string, []>("input_25_interleave_0"), val = tensor<bool, []>(false)];
|
| 1669 |
tensor<fp16, [1, 384, 1, 1500]> input_25_cast_fp16 = concat(axis = var_1728, interleave = input_25_interleave_0, values = (var_2192_cast_fp16, var_2194_cast_fp16, var_2196_cast_fp16, var_2198_cast_fp16, var_2200_cast_fp16, var_2202_cast_fp16))[name = tensor<string, []>("input_25_cast_fp16")];
|
| 1670 |
+
tensor<string, []> obj_pad_type_0 = const()[name = tensor<string, []>("obj_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1671 |
+
tensor<int32, [2]> obj_strides_0 = const()[name = tensor<string, []>("obj_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1672 |
tensor<int32, [4]> obj_pad_0 = const()[name = tensor<string, []>("obj_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1673 |
+
tensor<int32, [2]> obj_dilations_0 = const()[name = tensor<string, []>("obj_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1674 |
+
tensor<int32, []> obj_groups_0 = const()[name = tensor<string, []>("obj_groups_0"), val = tensor<int32, []>(1)];
|
| 1675 |
tensor<fp16, [384, 384, 1, 1]> layers_3_self_attn_o_proj_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_o_proj_weight_to_fp16"), val = tensor<fp16, [384, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(13760256)))];
|
| 1676 |
tensor<fp16, [384]> layers_3_self_attn_o_proj_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_self_attn_o_proj_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14055232)))];
|
| 1677 |
+
tensor<fp16, [1, 384, 1, 1500]> obj_cast_fp16 = conv(bias = layers_3_self_attn_o_proj_bias_to_fp16, dilations = obj_dilations_0, groups = obj_groups_0, pad = obj_pad_0, pad_type = obj_pad_type_0, strides = obj_strides_0, weight = layers_3_self_attn_o_proj_weight_to_fp16, x = input_25_cast_fp16)[name = tensor<string, []>("obj_cast_fp16")];
|
| 1678 |
tensor<fp16, [1, 384, 1, 1500]> inputs_15_cast_fp16 = add(x = inputs_13_cast_fp16, y = obj_cast_fp16)[name = tensor<string, []>("inputs_15_cast_fp16")];
|
| 1679 |
tensor<int32, [1]> out_15_axes_0 = const()[name = tensor<string, []>("out_15_axes_0"), val = tensor<int32, [1]>([1])];
|
| 1680 |
tensor<fp16, []> var_2221_to_fp16 = const()[name = tensor<string, []>("op_2221_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
|
|
|
| 1683 |
tensor<fp16, [384]> input_27_beta_0_to_fp16 = const()[name = tensor<string, []>("input_27_beta_0_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14056896)))];
|
| 1684 |
tensor<fp16, []> input_27_epsilon_0_to_fp16 = const()[name = tensor<string, []>("input_27_epsilon_0_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
| 1685 |
tensor<fp16, [1, 384, 1, 1500]> input_27_cast_fp16 = batch_norm(beta = input_27_beta_0_to_fp16, epsilon = input_27_epsilon_0_to_fp16, gamma = input_27_gamma_0_to_fp16, mean = obj_1_mean_0_to_fp16, variance = obj_1_variance_0_to_fp16, x = out_15_cast_fp16)[name = tensor<string, []>("input_27_cast_fp16")];
|
| 1686 |
+
tensor<string, []> input_29_pad_type_0 = const()[name = tensor<string, []>("input_29_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1687 |
+
tensor<int32, [2]> input_29_strides_0 = const()[name = tensor<string, []>("input_29_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1688 |
tensor<int32, [4]> input_29_pad_0 = const()[name = tensor<string, []>("input_29_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1689 |
+
tensor<int32, [2]> input_29_dilations_0 = const()[name = tensor<string, []>("input_29_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1690 |
+
tensor<int32, []> input_29_groups_0 = const()[name = tensor<string, []>("input_29_groups_0"), val = tensor<int32, []>(1)];
|
| 1691 |
tensor<fp16, [1536, 384, 1, 1]> layers_3_fc1_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_fc1_weight_to_fp16"), val = tensor<fp16, [1536, 384, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(14057728)))];
|
| 1692 |
tensor<fp16, [1536]> layers_3_fc1_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_fc1_bias_to_fp16"), val = tensor<fp16, [1536]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15237440)))];
|
| 1693 |
+
tensor<fp16, [1, 1536, 1, 1500]> input_29_cast_fp16 = conv(bias = layers_3_fc1_bias_to_fp16, dilations = input_29_dilations_0, groups = input_29_groups_0, pad = input_29_pad_0, pad_type = input_29_pad_type_0, strides = input_29_strides_0, weight = layers_3_fc1_weight_to_fp16, x = input_27_cast_fp16)[name = tensor<string, []>("input_29_cast_fp16")];
|
| 1694 |
tensor<string, []> input_mode_0 = const()[name = tensor<string, []>("input_mode_0"), val = tensor<string, []>("EXACT")];
|
| 1695 |
tensor<fp16, [1, 1536, 1, 1500]> input_cast_fp16 = gelu(mode = input_mode_0, x = input_29_cast_fp16)[name = tensor<string, []>("input_cast_fp16")];
|
| 1696 |
+
tensor<string, []> hidden_states_pad_type_0 = const()[name = tensor<string, []>("hidden_states_pad_type_0"), val = tensor<string, []>("valid")];
|
| 1697 |
+
tensor<int32, [2]> hidden_states_strides_0 = const()[name = tensor<string, []>("hidden_states_strides_0"), val = tensor<int32, [2]>([1, 1])];
|
|
|
|
| 1698 |
tensor<int32, [4]> hidden_states_pad_0 = const()[name = tensor<string, []>("hidden_states_pad_0"), val = tensor<int32, [4]>([0, 0, 0, 0])];
|
| 1699 |
+
tensor<int32, [2]> hidden_states_dilations_0 = const()[name = tensor<string, []>("hidden_states_dilations_0"), val = tensor<int32, [2]>([1, 1])];
|
| 1700 |
+
tensor<int32, []> hidden_states_groups_0 = const()[name = tensor<string, []>("hidden_states_groups_0"), val = tensor<int32, []>(1)];
|
| 1701 |
tensor<fp16, [384, 1536, 1, 1]> layers_3_fc2_weight_to_fp16 = const()[name = tensor<string, []>("layers_3_fc2_weight_to_fp16"), val = tensor<fp16, [384, 1536, 1, 1]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(15240576)))];
|
| 1702 |
tensor<fp16, [384]> layers_3_fc2_bias_to_fp16 = const()[name = tensor<string, []>("layers_3_fc2_bias_to_fp16"), val = tensor<fp16, [384]>(BLOBFILE(path = tensor<string, []>("@model_path/weights/weight.bin"), offset = tensor<uint64, []>(16420288)))];
|
| 1703 |
+
tensor<fp16, [1, 384, 1, 1500]> hidden_states_cast_fp16 = conv(bias = layers_3_fc2_bias_to_fp16, dilations = hidden_states_dilations_0, groups = hidden_states_groups_0, pad = hidden_states_pad_0, pad_type = hidden_states_pad_type_0, strides = hidden_states_strides_0, weight = layers_3_fc2_weight_to_fp16, x = input_cast_fp16)[name = tensor<string, []>("hidden_states_cast_fp16")];
|
| 1704 |
tensor<fp16, [1, 384, 1, 1500]> inputs_cast_fp16 = add(x = inputs_15_cast_fp16, y = hidden_states_cast_fp16)[name = tensor<string, []>("inputs_cast_fp16")];
|
| 1705 |
tensor<int32, [1]> out_axes_0 = const()[name = tensor<string, []>("out_axes_0"), val = tensor<int32, [1]>([1])];
|
| 1706 |
tensor<fp16, []> var_2259_to_fp16 = const()[name = tensor<string, []>("op_2259_to_fp16"), val = tensor<fp16, []>(0x1.5p-17)];
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/analytics/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 243
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:160d9737169d22dc01a899e1c6a0a9c44d0637d41f0dedb2a0b7c1422c4035d2
|
| 3 |
size 243
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cb3b3f51b080f58b12a6888a5e8ad57419be9e4c6843b96a7577f171b300e660
|
| 3 |
+
size 328
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/metadata.json
CHANGED
|
@@ -50,8 +50,8 @@
|
|
| 50 |
},
|
| 51 |
"userDefinedMetadata" : {
|
| 52 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 53 |
-
"com.github.apple.coremltools.
|
| 54 |
-
"com.github.apple.coremltools.
|
| 55 |
},
|
| 56 |
"inputSchema" : [
|
| 57 |
{
|
|
|
|
| 50 |
},
|
| 51 |
"userDefinedMetadata" : {
|
| 52 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 53 |
+
"com.github.apple.coremltools.source" : "torch==2.4.1",
|
| 54 |
+
"com.github.apple.coremltools.version" : "8.0"
|
| 55 |
},
|
| 56 |
"inputSchema" : [
|
| 57 |
{
|
openai_whisper-tiny.en/MelSpectrogram.mlmodelc/model.mil
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
program(1.0)
|
| 2 |
-
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.
|
| 3 |
{
|
| 4 |
func main<ios16>(tensor<fp16, [480000]> audio) {
|
| 5 |
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
|
|
|
| 1 |
program(1.0)
|
| 2 |
+
[buildInfo = dict<tensor<string, []>, tensor<string, []>>({{"coremlc-component-MIL", "3304.5.2"}, {"coremlc-version", "3304.6.2"}, {"coremltools-component-torch", "2.4.1"}, {"coremltools-source-dialect", "TorchScript"}, {"coremltools-version", "8.0"}})]
|
| 3 |
{
|
| 4 |
func main<ios16>(tensor<fp16, [480000]> audio) {
|
| 5 |
tensor<int32, [3]> var_10 = const()[name = tensor<string, []>("op_10"), val = tensor<int32, [3]>([1, 1, 480000])];
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/analytics/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 243
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:edb99a30ccee8e157fbec80dc3dce49349ba0982391b327d753e10ccab0a01c3
|
| 3 |
size 243
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/coremldata.bin
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:65c043a081845d190918b4c7d244f94a55df1a15fae796abedc1f414995542c6
|
| 3 |
+
size 633
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/metadata.json
CHANGED
|
@@ -85,7 +85,7 @@
|
|
| 85 |
"userDefinedMetadata" : {
|
| 86 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 87 |
"com.github.apple.coremltools.source" : "torch==2.4.1",
|
| 88 |
-
"com.github.apple.coremltools.version" : "8.
|
| 89 |
},
|
| 90 |
"inputSchema" : [
|
| 91 |
{
|
|
|
|
| 85 |
"userDefinedMetadata" : {
|
| 86 |
"com.github.apple.coremltools.source_dialect" : "TorchScript",
|
| 87 |
"com.github.apple.coremltools.source" : "torch==2.4.1",
|
| 88 |
+
"com.github.apple.coremltools.version" : "8.0"
|
| 89 |
},
|
| 90 |
"inputSchema" : [
|
| 91 |
{
|
openai_whisper-tiny.en/TextDecoder.mlmodelc/model.mil
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|