Lines Matching refs:i
162 int i = 0; in store_ymm() local
163 __asm__ volatile ("vmovaps %%ymm0, %0" :"=m" (vec256array[i])); in store_ymm()
164 i++; __asm__ volatile ("vmovaps %%ymm1, %0" :"=m" (vec256array[i])); in store_ymm()
165 i++; __asm__ volatile ("vmovaps %%ymm2, %0" :"=m" (vec256array[i])); in store_ymm()
166 i++; __asm__ volatile ("vmovaps %%ymm3, %0" :"=m" (vec256array[i])); in store_ymm()
167 i++; __asm__ volatile ("vmovaps %%ymm4, %0" :"=m" (vec256array[i])); in store_ymm()
168 i++; __asm__ volatile ("vmovaps %%ymm5, %0" :"=m" (vec256array[i])); in store_ymm()
169 i++; __asm__ volatile ("vmovaps %%ymm6, %0" :"=m" (vec256array[i])); in store_ymm()
170 i++; __asm__ volatile ("vmovaps %%ymm7, %0" :"=m" (vec256array[i])); in store_ymm()
172 i++; __asm__ volatile ("vmovaps %%ymm8, %0" :"=m" (vec256array[i])); in store_ymm()
173 i++; __asm__ volatile ("vmovaps %%ymm9, %0" :"=m" (vec256array[i])); in store_ymm()
174 i++; __asm__ volatile ("vmovaps %%ymm10, %0" :"=m" (vec256array[i])); in store_ymm()
175 i++; __asm__ volatile ("vmovaps %%ymm11, %0" :"=m" (vec256array[i])); in store_ymm()
176 i++; __asm__ volatile ("vmovaps %%ymm12, %0" :"=m" (vec256array[i])); in store_ymm()
177 i++; __asm__ volatile ("vmovaps %%ymm13, %0" :"=m" (vec256array[i])); in store_ymm()
178 i++; __asm__ volatile ("vmovaps %%ymm14, %0" :"=m" (vec256array[i])); in store_ymm()
179 i++; __asm__ volatile ("vmovaps %%ymm15, %0" :"=m" (vec256array[i])); in store_ymm()
298 int i; in copy_ymm_state_to_vector() local
302 for (i = 0; i < YMM_MAX; i++) { in copy_ymm_state_to_vector()
303 bcopy(&xmm[i], &vp[i], sizeof(*xmm)); in copy_ymm_state_to_vector()
304 bcopy(&ymmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*ymmh)), sizeof(*ymmh)); in copy_ymm_state_to_vector()
447 int i = 0; in store_zmm() local
448 __asm__ volatile ("vmovaps %%zmm0, %0" :"=m" (vecarray[i])); in store_zmm()
449 i++; __asm__ volatile ("vmovaps %%zmm1, %0" :"=m" (vecarray[i])); in store_zmm()
450 i++; __asm__ volatile ("vmovaps %%zmm2, %0" :"=m" (vecarray[i])); in store_zmm()
451 i++; __asm__ volatile ("vmovaps %%zmm3, %0" :"=m" (vecarray[i])); in store_zmm()
452 i++; __asm__ volatile ("vmovaps %%zmm4, %0" :"=m" (vecarray[i])); in store_zmm()
453 i++; __asm__ volatile ("vmovaps %%zmm5, %0" :"=m" (vecarray[i])); in store_zmm()
454 i++; __asm__ volatile ("vmovaps %%zmm6, %0" :"=m" (vecarray[i])); in store_zmm()
455 i++; __asm__ volatile ("vmovaps %%zmm7, %0" :"=m" (vecarray[i])); in store_zmm()
457 i++; __asm__ volatile ("vmovaps %%zmm8, %0" :"=m" (vecarray[i])); in store_zmm()
458 i++; __asm__ volatile ("vmovaps %%zmm9, %0" :"=m" (vecarray[i])); in store_zmm()
459 i++; __asm__ volatile ("vmovaps %%zmm10, %0" :"=m" (vecarray[i])); in store_zmm()
460 i++; __asm__ volatile ("vmovaps %%zmm11, %0" :"=m" (vecarray[i])); in store_zmm()
461 i++; __asm__ volatile ("vmovaps %%zmm12, %0" :"=m" (vecarray[i])); in store_zmm()
462 i++; __asm__ volatile ("vmovaps %%zmm13, %0" :"=m" (vecarray[i])); in store_zmm()
463 i++; __asm__ volatile ("vmovaps %%zmm14, %0" :"=m" (vecarray[i])); in store_zmm()
464 i++; __asm__ volatile ("vmovaps %%zmm15, %0" :"=m" (vecarray[i])); in store_zmm()
465 i++; __asm__ volatile ("vmovaps %%zmm16, %0" :"=m" (vecarray[i])); in store_zmm()
466 i++; __asm__ volatile ("vmovaps %%zmm17, %0" :"=m" (vecarray[i])); in store_zmm()
467 i++; __asm__ volatile ("vmovaps %%zmm18, %0" :"=m" (vecarray[i])); in store_zmm()
468 i++; __asm__ volatile ("vmovaps %%zmm19, %0" :"=m" (vecarray[i])); in store_zmm()
469 i++; __asm__ volatile ("vmovaps %%zmm20, %0" :"=m" (vecarray[i])); in store_zmm()
470 i++; __asm__ volatile ("vmovaps %%zmm21, %0" :"=m" (vecarray[i])); in store_zmm()
471 i++; __asm__ volatile ("vmovaps %%zmm22, %0" :"=m" (vecarray[i])); in store_zmm()
472 i++; __asm__ volatile ("vmovaps %%zmm23, %0" :"=m" (vecarray[i])); in store_zmm()
473 i++; __asm__ volatile ("vmovaps %%zmm24, %0" :"=m" (vecarray[i])); in store_zmm()
474 i++; __asm__ volatile ("vmovaps %%zmm25, %0" :"=m" (vecarray[i])); in store_zmm()
475 i++; __asm__ volatile ("vmovaps %%zmm26, %0" :"=m" (vecarray[i])); in store_zmm()
476 i++; __asm__ volatile ("vmovaps %%zmm27, %0" :"=m" (vecarray[i])); in store_zmm()
477 i++; __asm__ volatile ("vmovaps %%zmm28, %0" :"=m" (vecarray[i])); in store_zmm()
478 i++; __asm__ volatile ("vmovaps %%zmm29, %0" :"=m" (vecarray[i])); in store_zmm()
479 i++; __asm__ volatile ("vmovaps %%zmm30, %0" :"=m" (vecarray[i])); in store_zmm()
480 i++; __asm__ volatile ("vmovaps %%zmm31, %0" :"=m" (vecarray[i])); in store_zmm()
759 for (int i = 0; i < KARRAY_MAX; i++) { in assert_opmask_eq() local
760 if (a[i] != b[i]) { in assert_opmask_eq()
788 for (int i = 0; i < KARRAY_MAX; i++) { in copy_state_to_opmask() local
789 bcopy(&k[i], &op[i], sizeof(*op)); in copy_state_to_opmask()
796 int i; in copy_zmm_state_to_vector() local
803 for (i = 0; i < ZMM_MAX / 2; i++) { in copy_zmm_state_to_vector()
804 bcopy(&xmm[i], &vp[i], sizeof(*xmm)); in copy_zmm_state_to_vector()
805 bcopy(&ymmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*ymmh)), sizeof(*ymmh)); in copy_zmm_state_to_vector()
806 bcopy(&zmmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*zmmh)), sizeof(*zmmh)); in copy_zmm_state_to_vector()
807 bcopy(&zmm[i], &vp[(ZMM_MAX / 2) + i], sizeof(*zmm)); in copy_zmm_state_to_vector()
810 for (i = 0; i < ZMM_MAX; i++) { in copy_zmm_state_to_vector()
811 bcopy(&xmm[i], &vp[i], sizeof(*xmm)); in copy_zmm_state_to_vector()
812 bcopy(&ymmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*ymmh)), sizeof(*ymmh)); in copy_zmm_state_to_vector()
813 bcopy(&zmmh[i], (void *) ((uint64_t)&vp[i] + sizeof(*zmmh)), sizeof(*zmmh)); in copy_zmm_state_to_vector()