diff --git a/sm4/gcm_arm64.s b/sm4/gcm_arm64.s index a4171af..bad2d71 100644 --- a/sm4/gcm_arm64.s +++ b/sm4/gcm_arm64.s @@ -107,76 +107,77 @@ TEXT ·gcmSm4Finish(SB),NOSPLIT,$0 #undef dlen #define SM4_SBOX(x, y, z, z1, z2) \ - VMOV $0x0F0F0F0F0F0F0F0F, z1.D2; \ // nibble mask - VAND x.B16, z1.B16, z2.B16; \ + MOVD $0x0F0F0F0F0F0F0F0F, R19; \ + VMOV R19, z1.D2; \ // nibble mask + VAND x.B16, z1.B16, z2.B16; \ MOVD $0x9197E2E474720701, R19; \ VMOV R19, z.D[0]; \ MOVD $0xC7C1B4B222245157, R19; \ VMOV R19, z.D[1]; \ // m1 low - VTBL z2.B16, [z.B16], y.B16; \ - VUSHR $4, x.D2, x.D2; \ - VAND x.B16, z1.B16, z2.B16; \ + VTBL z2.B16, [z.B16], y.B16; \ + VUSHR $4, x.D2, x.D2; \ + VAND x.B16, z1.B16, z2.B16; \ MOVD $0xE240AB09EB49A200, R19; \ VMOV R19, z.D[0]; \ MOVD $0xF052B91BF95BB012, R19; \ VMOV R19, z.D[1]; \ // m1 high - VTBL z2.B16, [z.B16], z2.B16; \ - VEOR y.B16, z2.B16, x.B16; \ + VTBL z2.B16, [z.B16], z2.B16; \ + VEOR y.B16, z2.B16, x.B16; \ MOVD $0x0B0E0104070A0D00, R19; \ VMOV R19, z.D[0]; \ MOVD $0x0306090C0F020508, R19; \ VMOV R19, z.D[1]; \ // inverse shift row - VTBL z.B16, [x.B16], x.B16; \ - AESE ZERO.B16, x.B16; \ - VAND x.B16, z1.B16, z2.B16; \ + VTBL z.B16, [x.B16], x.B16; \ + AESE ZERO.B16, x.B16; \ + VAND x.B16, z1.B16, z2.B16; \ MOVD $0x5B67F2CEA19D0834, R19; \ VMOV R19, z.D[0]; \ MOVD $0xEDD14478172BBE82, R19; \ VMOV R19, z.D[1]; \ // m2 low - VTBL z2.B16, [z.B16], y.B16; \ - VUSHR $4, x.D2, x.D2; \ - VAND x.B16, z1.B16, z2.B16; \ + VTBL z2.B16, [z.B16], y.B16; \ + VUSHR $4, x.D2, x.D2; \ + VAND x.B16, z1.B16, z2.B16; \ MOVD $0xAE7201DD73AFDC00, R19; \ VMOV R19, z.D[0]; \ MOVD $0x11CDBE62CC1063BF, R19; \ VMOV R19, z.D[1]; \ // m2 high - VTBL z2.B16, [z.B16], z2.B16; \ - VEOR y.B16, z2.B16, x.B16 + VTBL z2.B16, [z.B16], z2.B16; \ + VEOR y.B16, z2.B16, x.B16 #define SM4_TAO_L1(x, y, z, z1, z2) \ - SM4_SBOX(x, y, z, z1, z2); \ - ; \ + SM4_SBOX(x, y, z, z1, z2); \ + ; \ MOVD $0x0605040702010003, R19; \ VMOV R19, z.D[0]; \ MOVD $0x0E0D0C0F0A09080B, R19; \ VMOV R19, z.D[1]; \ // r08 mask - VTBL z.B16, [x.B16], y.B16; \ - VEOR y.B16, x.B16, y.B16; \ + VTBL z.B16, [x.B16], y.B16; \ + VEOR y.B16, x.B16, y.B16; \ MOVD $0x0504070601000302, R19; \ VMOV R19, z.D[0]; \ MOVD $0x0D0C0F0E09080B0A, R19; \ VMOV R19, z.D[1]; \ // r16 mask - VTBL z.B16, [x.B16], z.B16; \ - VEOR z.B16, y.B16, y.B16; \ - VSHL $2, y.S4, z.S4; \ - VUSHR $30, y.S4, y.S4; \ - VORR y.B16, z.B16, y.B16; \ + VTBL z.B16, [x.B16], z.B16; \ + VEOR z.B16, y.B16, y.B16; \ + VSHL $2, y.S4, z.S4; \ + VUSHR $30, y.S4, y.S4; \ + VORR y.B16, z.B16, y.B16; \ MOVD $0x0407060500030201, R19; \ VMOV R19, z.D[0]; \ MOVD $0x0C0F0E0D080B0A09, R19; \ VMOV R19, z.D[1]; \ // r24 mask - VTBL z.B16, [x.B16], z.B16; \ - VEOR z.B16, x.B16, x.B16; \ - VEOR y.B16, x.B16, x.B16 + VTBL z.B16, [x.B16], z.B16; \ + VEOR z.B16, x.B16, x.B16; \ + VEOR y.B16, x.B16, x.B16 #define SM4_ROUND(RK, x, y, z, z1, z2, t0, t1, t2, t3) \ - MOVW.P 4(RK), R19; \ - VMOV R19, x.S4; \ - VEOR t1.B16, x.B16, x.B16; \ - VEOR t2.B16, x.B16, x.B16; \ - VEOR t3.B16, x.B16, x.B16; \ - SM4_TAO_L1(x, y, z, z1, z2); \ - VEOR x.B16, t0.B16, t0.B16 + MOVW.P 4(RK), R19; \ + VMOV R19, x.S4; \ + VEOR t1.B16, x.B16, x.B16; \ + VEOR t2.B16, x.B16, x.B16; \ + VEOR t3.B16, x.B16, x.B16; \ + SM4_TAO_L1(x, y, z, z1, z2); \ + VEOR x.B16, t0.B16, t0.B16 // func gcmSm4Init(productTable *[256]byte, rk []uint32) TEXT ·gcmSm4Init(SB),NOSPLIT,$0 @@ -194,7 +195,7 @@ TEXT ·gcmSm4Init(SB),NOSPLIT,$0 VMOV I, POLY.D[1] VEOR ZERO.B16, ZERO.B16, ZERO.B16 - // Encrypt block 0 with the SM4 keys to generate the hash key H + // Encrypt block 0 with the SM4 keys to generate the hash key H VEOR B0.B16, B0.B16, B0.B16 VEOR B1.B16, B1.B16, B1.B16 VEOR B2.B16, B2.B16, B2.B16 @@ -207,9 +208,9 @@ sm4InitEncLoop: SM4_ROUND(RK, K0, K1, K2, K3, K4, B2, B3, B0, B1) SM4_ROUND(RK, K0, K1, K2, K3, K4, B3, B0, B1, B2) - ADD $16, R3 - CMP $128, R3 - BNE sm4InitEncLoop + ADD $16, R3 + CMP $128, R3 + BNE sm4InitEncLoop VMOV B1.S[0], B0.S[1] VMOV B2.S[0], B0.S[2]