@@ -11,36 +11,54 @@ void calc_superblock(const uint a_offset, const uint b_offset, const uint itid,
1111 const uint y_idx = i * QUANT_K + 16 * itid;
1212 const uint nibble_shift = 4 * (itid & 1);
1313 const uint ib32 = itid / 2; // 0..7
14-
1514 uint ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
15+ // Precompute db multiplication factors
16+ float db_vals[NUM_ROWS];
1617 [[unroll]] for (uint n = 0; n < num_rows; ++n) {
1718 const float d = float(data_a[ibi].d);
18- const uint scale = (data_a[ibi].scales[ib32] >> nibble_shift) & 0xF;
19- const float db = d * (0.5 + scale) * 0.25;
20-
19+ const uint scale_raw = data_a[ibi].scales[ib32];
20+ const uint scale = (scale_raw >> nibble_shift) & 0xF;
21+ // Merge constant calculations d * (0.5 + scale) * 0.25 = d*0.125 + d*scale*0.25
22+ db_vals[n] = d * (0.125f + float(scale) * 0.25f);
23+ ibi += num_blocks_per_row;
24+ }
25+ ibi = a_offset / QUANT_K + first_row * num_blocks_per_row + i;
26+ [[unroll]] for (uint n = 0; n < num_rows; ++n) {
27+ // Preload grid and sign data for all l values
28+ vec4 grid0_vals[2], grid1_vals[2];
29+ uint sign_vals[2], sign7_vals[2];
2130 [[unroll]] for (uint l = 0; l < 2; ++l) {
2231 const uint qs = data_a[ibi].qs[2 * itid + l];
23- const uint sign = qs >> 9;
24- const uint sign7 = bitCount(sign);
25- const vec4 grid0 = vec4(unpack8(iq2xs_grid[qs & 511].x));
26- const vec4 grid1 = vec4(unpack8(iq2xs_grid[qs & 511].y));
27-
28- [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
29- vec4 b0 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 0]);
30- vec4 b4 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 2*l + 1]);
31-
32- FLOAT_TYPE sum =
33- fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
34- fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
35- fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
36- fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
37- fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
38- fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
39- fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
40- fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 & 1) != 0 ? -grid1.w : grid1.w),
41- FLOAT_TYPE(0.0)))))))));
42- temp[j][n] = fma(db, sum, temp[j][n]);
32+ sign_vals[l] = qs >> 9;
33+ sign7_vals[l] = bitCount(sign_vals[l]);
34+ const uvec2 grid_data = iq2xs_grid[qs & 511];
35+ grid0_vals[l] = vec4(unpack8(grid_data.x));
36+ grid1_vals[l] = vec4(unpack8(grid_data.y));
37+ }
38+ // Preload B data for all j columns (reduce repeated index calculations)
39+ [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
40+ FLOAT_TYPE sum = FLOAT_TYPE(0.0);
41+ [[unroll]] for (uint l = 0; l < 2; ++l) {
42+ const uint sign = sign_vals[l];
43+ const uint sign7 = sign7_vals[l];
44+ const vec4 grid0 = grid0_vals[l];
45+ const vec4 grid1 = grid1_vals[l];
46+ // Precompute indices
47+ const uint b_idx = (j * p.batch_stride_b + b_offset + y_idx) / 4 + 2 * l;
48+ const vec4 b0 = vec4(data_b_v4[b_idx + 0]);
49+ const vec4 b4 = vec4(data_b_v4[b_idx + 1]);
50+ sum +=
51+ fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign & 1) != 0 ? -grid0.x : grid0.x),
52+ fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign & 2) != 0 ? -grid0.y : grid0.y),
53+ fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign & 4) != 0 ? -grid0.z : grid0.z),
54+ fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign & 8) != 0 ? -grid0.w : grid0.w),
55+ fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign & 16) != 0 ? -grid1.x : grid1.x),
56+ fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign & 32) != 0 ? -grid1.y : grid1.y),
57+ fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign & 64) != 0 ? -grid1.z : grid1.z),
58+ fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 & 1) != 0 ? -grid1.w : grid1.w),
59+ FLOAT_TYPE(0.0)))))))));
4360 }
61+ temp[j][n] = fma(FLOAT_TYPE(db_vals[n]), sum, temp[j][n]);
4462 }
4563 ibi += num_blocks_per_row;
4664 }
0 commit comments