Home
last modified time | relevance | path

Searched refs:v0 (Results 1 – 15 of 15) sorted by relevance

/xnu-11215.81.4/bsd/dev/arm64/
H A Dcpu_memcmp_mask.s68 ld1.16b {v0}, [src1]
71 eor.16b v0, v0, v1
72 and.16b v0, v0, v2
73 umaxv b0, v0.16b
74 umov w0, v0.s[0]
95 ld1.16b {v0, v1}, [src1]
98 eor.16b v0, v0, v2
100 and.16b v0, v0, v4
102 orr.16b v0, v0, v1
103 umaxv b0, v0.16b
[all …]
H A Dcpu_copy_in_cksum.s111 st1.4s {v0, v1, v2, v3}, [x11], #4*16
119 eor.16b v0, v0, v0
122 mov v0.d[0], partial // move partial to 1st 64b lane in v0
156 uadalp.2d v0, v4
170 uadalp.2d v0, v16
188 uadalp.2d v0, v4
197 uadalp.2d v0, v16
207 add.2d v0, v0, v2
224 uadalp.2d v0, v2
231 uadalp.2d v0, v2
[all …]
H A Dcpu_in_cksum.s284 st1.4s {v0, v1, v2, v3}, [x11], #4*16
289 eor.16b v0, v0, v0
307 uadalp.2d v0, v4
322 uadalp.2d v0, v4
327 add.2d v0, v0, v1
329 addp.2d d0, v0
331 add.2d v0, v0, v2
335 ld1.4s {v0, v1, v2, v3}, [sp], #4*16
/xnu-11215.81.4/osfmk/arm64/
H A Dstrnlen.s98 orr.16b v0, v0, v1
109 1: uminv.16b b1, v0
131 cmhi.16b v0, v0, v1
132 orr.16b v0, v0, v2
133 uminv.16b b1, v0
166 orr.16b v0, v0, v1
178 1: uminv.16b b1, v0
196 cmhi.16b v0, v0, v1
197 orr.16b v0, v0, v2
198 uminv.16b b1, v0
H A Dstrncmp.s129 cmeq.16b v1, v0, v1
130 and.16b v0, v0, v1 // contains zero byte iff mismatch or EOS
131 uminv.16b b1, v0
172 cmeq.16b v1, v0, v1
173 and.16b v0, v0, v1 // contains zero byte iff mismatch or EOS
174 uminv.16b b1, v0
185 cmeq.16b v1, v0, v1
186 and.16b v0, v0, v1 // contains zero byte iff mismatch or EOS
187 uminv.16b b1, v0
199 cmhi.16b v0, v0, v1 // force non-zero lanes to 0xff
[all …]
H A DWKdmDecompress_16k.s141 st1.4s {v0,v1,v2},[rax],#48
196 ld1.4s {v0,v1},[rbx]
219 ushl.4s v2, v2, v0 // v1 = {0, -2, -4, -6}
220 ushl.4s v3, v3, v0 // v1 = {0, -2, -4, -6}
221 ushl.4s v4, v4, v0 // v1 = {0, -2, -4, -6}
222 ushl.4s v5, v5, v0 // v1 = {0, -2, -4, -6}
243 ld1.4s {v0,v1},[rbx]
250 ushl.4s v2, v2, v0 // v1 = {0, -4, 0, -4}
260 ushl.2s v2, v2, v0 // v1 = {0, -4}
282 ld1.4s {v0,v1,v2,v3},[rbx]
[all …]
H A DWKdmDecompress_4k.s141 st1.4s {v0,v1,v2},[rax],#48
196 ld1.4s {v0,v1},[rbx]
219 ushl.4s v2, v2, v0 // v1 = {0, -2, -4, -6}
220 ushl.4s v3, v3, v0 // v1 = {0, -2, -4, -6}
221 ushl.4s v4, v4, v0 // v1 = {0, -2, -4, -6}
222 ushl.4s v5, v5, v0 // v1 = {0, -2, -4, -6}
243 ld1.4s {v0,v1},[rbx]
250 ushl.4s v2, v2, v0 // v1 = {0, -4, 0, -4}
260 ushl.2s v2, v2, v0 // v1 = {0, -4}
282 ld1.4s {v0,v1,v2,v3},[rbx]
[all …]
H A DWKdmCompress_16k.s245 st1.4s {v0,v1,v2,v3},[sp]
358 ld1.2s {v0,v1,v2,v3},[rcx],#32
363 orr.8b v0, v0, v1
366 ushr.2d v1, v0, #30
369 orr.8b v0, v0, v1
372 zip1.2s v0, v0, v2
373 st1.2s {v0},[rdi],#8
470 ld1.4s {v0,v1,v2,v3},[sp],#64
H A DWKdmCompress_4k.s243 st1.4s {v0,v1,v2,v3},[sp]
356 ld1.2s {v0,v1,v2,v3},[rcx],#32
361 orr.8b v0, v0, v1
364 ushr.2d v1, v0, #30
367 orr.8b v0, v0, v1
370 zip1.2s v0, v0, v2
371 st1.2s {v0},[rdi],#8
468 ld1.4s {v0,v1,v2,v3},[sp],#64
H A Dmemcmp_zero.s98 orr.16b v4, v4, v0 // use orr to keep non-zero bytes
114 orr.16b v4, v4, v0 // use orr to keep non-zero bytes
123 umov w0, v0.b[0] // move byte to GPR for testing
H A Dlz4_decode_arm64.s226 tbl v0.16b,{v0.16b},v1.16b // low 16 bytes of pattern
253 tbl v0.16b,{v1.16b},v2.16b // low 16 bytes of pattern in q0
H A Dmachine_routines_asm.s1459 dup.4s v0, w2
/xnu-11215.81.4/bsd/dev/i386/
H A Dcpu_copy_in_cksum.s87 #define v0 %xmm0 macro
143 movdqa v0, 0*16(%rsp)
166 pxor v0, v0
169 movq partial, v0 // move partial to 1st 64b lane in v0
225 paddq v4, v0
246 paddq v12, v0
268 paddq v8, v0
288 paddq v12, v0
322 paddq v4, v0
339 paddq v12, v0
[all …]
/xnu-11215.81.4/bsd/dev/arm/
H A Dcpu_copy_in_cksum.s57 #define v0 q0 macro
124 vpush {v0-v3}
132 veor v0, v0, v0
156 vpadal.u16 v0, v8
166 vpadal.u16 v0, v12
180 vpadal.u16 v0, v8
188 vpadal.u16 v0, v12
198 vadd.i32 v0, v0, v2
208 vpadal.u16 v0, v8
212 vpadal.u16 v0, v10
[all …]
/xnu-11215.81.4/osfmk/arm64/corecrypto/
H A Dsha256_compress_arm64.s234 st1.4s {v0, v1, v2, v3}, [x4], #64
241 ld1.4s {v0,v1,v2,v3}, [data], #64 // w0,w1,w2,w3 need to bswap into big-endian
243 rev32.16b v0, v0 // byte swap of 1st 4 ints
254 add.4s v4, v0, v21 // 1st 4 input + K256
300 add.4s v4, v0, v21
320 rev32.16b v0, v0
324 add.4s v4, v0, v21
354 add.4s v4, v0, v21
368 add.4s v4, v0, v21
383 add.4s v4, v0, v21
[all …]