| 1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192 | 	.section ".text.hot.memcpy","ax"	.balign 4	.globl	memcpymemcpy:#if 0	j	__memcpy_bytewise#else	or	a5, a0, a1	or	a5, a5, a2	andi	a5, a5, 3	.option	norvc	bnez	a5, __memcpy_bytewise	.option rvc#endif	.type memcpy, @function	.size memcpy, . - memcpy	.balign 4	.globl __memcpy_aligned__memcpy_aligned:	add	a4, a0, a2	mv	a3, a0		// a0 is also return value	andi	a2, a2, 7*4	.option	norelax	sub	a5, zero, a2	jr	a5, %lo(.L_case0)	.option	relax	.balign	4.L_aligned_loop:	.option norvc	li	a2, 32	.option rvc	c.lw	a5, 28(a1)	c.sw	a5, 28(a3).L_case7:	c.lw	a5, 24(a1)	c.sw	a5, 24(a3).L_case6:	c.lw	a5, 20(a1)	c.sw	a5, 20(a3).L_case5:	c.lw	a5, 16(a1)	c.sw	a5, 16(a3).L_case4:	c.lw	a5, 12(a1)	c.sw	a5, 12(a3).L_case3:	c.lw	a5,  8(a1)	c.sw	a5,  8(a3).L_case2:	c.lw	a5,  4(a1)	c.sw	a5,  4(a3).L_case1:	c.lw	a5,  0(a1)	c.sw	a5,  0(a3).L_case0:	add	a1, a1, a2	add	a3, a3, a2	bltu	a3, a4, .L_aligned_loop.L_empty:	ret	.type	__memcpy_aligned, @function	.size	__memcpy_aligned, . - __memcpy_aligned	// This can be used by I/O devices that need bytewise accesses	.balign 4__memcpy_bytewise:	.option norvc	add	a4, a0, a2	mv	a3, a0	.option	rvc	.balign 4.L_bytewise_loop:	lbu	a5, 0(a1)	sb	a5, 0(a3)	addi	a1, a1, 1	addi	a3, a3, 1	bltu	a3, a4, .L_bytewise_loop	ret	.type	__memcpy_bytewise, @function	.size	__memcpy_bytewise, . - __memcpy_bytewise
 |