asm-rs

A pure Rust multi-architecture assembly engine for offensive security. Zero unsafe, no_std-compatible, designed for embedding in exploit compilers, JIT engines, security tools, and shellcode generators.


Project maintained by hupe1980 Hosted on GitHub Pages — Theme by mattgraham

x86 / x86-64 Instruction Reference

Complete reference for all x86 and x86-64 instructions supported by asm-rs, including SSE through AVX-512, AES-NI, SHA, BMI, and more.


Notation

Symbol Meaning
r64 64-bit general-purpose register (rax–r15)
r32 32-bit general-purpose register (eax–r15d)
r16 16-bit general-purpose register (ax–r15w)
r8 8-bit register (al–r15b, ah–dh)
[mem] Memory operand with optional SIB addressing
imm8/16/32/64 Immediate of specified bit width
label Label reference (resolved to offset)
xmm SSE register (xmm0–xmm15)
ymm AVX register (ymm0–ymm15)
zmm AVX-512 register (zmm0–zmm31)

Data Movement

MOV — Move

mov r64, r64                # Register to register
mov r64, imm64              # 64-bit immediate (movabs)
mov r32, imm32              # 32-bit immediate (zero-extends to 64)
mov r64, [mem]              # Load from memory
mov [mem], r64              # Store to memory
mov r64, imm32              # Sign-extended 32-bit immediate

MOVZX — Move with Zero Extension

movzx r32, r8               # Zero-extend 8-bit to 32-bit
movzx r32, r16              # Zero-extend 16-bit to 32-bit
movzx r64, r8               # Zero-extend 8-bit to 64-bit
movzx r32, byte ptr [mem]   # Zero-extend memory byte
movzx r32, word ptr [mem]   # Zero-extend memory word

MOVSX / MOVSXD — Move with Sign Extension

movsx r32, r8               # Sign-extend 8-bit to 32-bit
movsx r32, r16              # Sign-extend 16-bit to 32-bit
movsx r64, r8               # Sign-extend 8-bit to 64-bit
movsxd r64, r32             # Sign-extend 32-bit to 64-bit

LEA — Load Effective Address

lea r64, [base + index*scale + disp]
lea rax, [rbx + rcx*8 + 16]

XCHG — Exchange

xchg r64, r64               # Register-register exchange
xchg r32, r32
xchg r16, r16
xchg r64, [mem]             # Register-memory exchange
xchg [mem], r64             # Memory-register exchange

PUSH / POP — Stack Operations

push r64                    # Push register
push r16                    # Push 16-bit register (with 0x66 prefix)
push imm8                   # Push sign-extended 8-bit immediate
push imm32                  # Push sign-extended 32-bit immediate
push [mem]                  # Push memory
push fs                     # Push FS segment register
push gs                     # Push GS segment register
pop r64                     # Pop to register
pop r16                     # Pop 16-bit register
pop [mem]                   # Pop to memory
pop fs                      # Pop FS segment register
pop gs                      # Pop GS segment register

In 64-bit mode, push/pop of 32-bit GPRs and CS/DS/ES/SS segment registers is rejected.

MOVBE — Move Data After Byte Swap

movbe r32, [mem]            # Load with byte swap (big-endian load)
movbe r64, [mem]
movbe [mem], r32            # Store with byte swap (big-endian store)
movbe [mem], r64

MOVNTI — Non-Temporal Store

movnti [mem], r32           # Non-temporal store (bypasses cache)
movnti [mem], r64

Arithmetic

ADD / SUB / ADC / SBB

add r64, r64                # Register-register
add r64, imm32              # Register-immediate
add r64, [mem]              # Register-memory
add [mem], r64              # Memory-register
add [mem], imm32            # Memory-immediate
add al, imm8               # Accumulator short form
add rax, imm32              # Accumulator short form (sign-extended)
# Same forms for sub, adc (add with carry), sbb (subtract with borrow)

INC / DEC

inc r64                     # Increment register
inc r32
inc byte ptr [mem]          # Increment memory
dec r64                     # Decrement register
dec [mem]

NEG — Two’s Complement Negation

neg r64
neg r32
neg byte ptr [mem]

MUL / IMUL — Multiply

mul r64                     # Unsigned: rdx:rax = rax * r64
mul r32                     # Unsigned: edx:eax = eax * r32
imul r64, r64               # Signed: r64 = r64 * r64
imul r64, r64, imm32        # Signed: r64 = r64 * imm32
imul r64, [mem]             # Signed: r64 = r64 * [mem]

DIV / IDIV — Divide

div r64                     # Unsigned: rax = rdx:rax / r64, rdx = remainder
div r32                     # Unsigned: eax = edx:eax / r32
idiv r64                    # Signed division
idiv r32

Logic

and r64, r64                # Bitwise AND
and r64, imm32
or r64, r64                 # Bitwise OR
or r64, imm32
xor r64, r64                # Bitwise XOR
xor r64, imm32
not r64                     # Bitwise NOT
test r64, r64               # AND without storing result (flags only)
test r64, imm32
cmp r64, r64                # SUB without storing result (flags only)
cmp r64, imm32

Shifts & Rotates

shl r64, imm8               # Shift left
shl r64, cl                 # Shift left by CL
shr r64, imm8               # Shift right (logical)
sar r64, imm8               # Shift right (arithmetic)
rol r64, imm8               # Rotate left
ror r64, imm8               # Rotate right
rcl r64, imm8               # Rotate through carry left
rcr r64, imm8               # Rotate through carry right
shld r64, r64, imm8         # Double-precision shift left
shld r64, r64, cl
shrd r64, r64, imm8         # Double-precision shift right
shrd r64, r64, cl

Control Flow

JMP / CALL / RET

jmp label                   # Unconditional jump (rel8 or rel32)
jmp r64                     # Indirect jump
jmp [mem]                   # Memory-indirect jump
call label                  # Call (rel32)
call r64                    # Indirect call
call [mem]                  # Memory-indirect call
ret                         # Return
ret imm16                   # Return and pop imm16 bytes
retf                        # Far return (lret)

Jcc — Conditional Jumps

All 16 condition codes (+ aliases):

je label      / jz label       # Equal / Zero
jne label     / jnz label      # Not equal / Not zero
jb label      / jc label       # Below / Carry (unsigned <)
jae label     / jnc label      # Above or equal / No carry (unsigned >=)
ja label      / jnbe label     # Above (unsigned >)
jbe label     / jna label      # Below or equal (unsigned <=)
jl label      / jnge label     # Less (signed <)
jge label     / jnl label      # Greater or equal (signed >=)
jg label      / jnle label     # Greater (signed >)
jle label     / jng label      # Less or equal (signed <=)
js label                       # Sign (negative)
jns label                      # Not sign (positive/zero)
jo label                       # Overflow
jno label                      # Not overflow
jp label      / jpe label      # Parity even
jnp label     / jpo label      # Parity odd

SETcc — Set Byte on Condition

sete r8                     # Set byte if equal
setne r8                    # Set byte if not equal
setb r8                     # Set byte if below
# ... all 16 conditions

CMOVcc — Conditional Move

cmove r64, r64              # Move if equal
cmovne r64, r64             # Move if not equal
cmovb r64, [mem]            # Move if below
# ... all 16 conditions

LOOP / JECXZ / JRCXZ

loop label                  # Decrement RCX, jump if != 0
loope label                 # Decrement RCX, jump if != 0 and ZF=1
loopne label                # Decrement RCX, jump if != 0 and ZF=0
jecxz label                 # Jump if ECX == 0
jrcxz label                 # Jump if RCX == 0

LOOP/JECXZ/JRCXZ automatically relax via inversion + JMP rel32 when the target is out of rel8 range.

ENTER / LEAVE

enter imm16, imm8           # Create stack frame
leave                       # Destroy stack frame (mov rsp, rbp; pop rbp)

Bit Manipulation

bt r64, r64                 # Bit test → CF
bt r64, imm8
bts r64, r64                # Bit test and set
btr r64, r64                # Bit test and reset
btc r64, r64                # Bit test and complement
bsf r64, r64                # Bit scan forward
bsr r64, r64                # Bit scan reverse
popcnt r64, r64             # Population count
lzcnt r64, r64              # Leading zero count
tzcnt r64, r64              # Trailing zero count
bswap r64                   # Byte swap

BMI1 / BMI2

andn r64, r64, r64          # AND-NOT
bextr r64, r64, r64         # Bit field extract
blsi r64, r64               # Extract lowest set bit
blsmsk r64, r64             # Mask up to lowest set bit
blsr r64, r64               # Reset lowest set bit
bzhi r64, r64, r64          # Zero high bits
mulx r64, r64, r64          # Unsigned multiply (no flags)
pdep r64, r64, r64          # Parallel bit deposit
pext r64, r64, r64          # Parallel bit extract
rorx r64, r64, imm8         # Rotate right (no flags)
sarx r64, r64, r64          # Shift arithmetic right (no flags)
shlx r64, r64, r64          # Shift left (no flags)
shrx r64, r64, r64          # Shift right (no flags)

ADX — Multi-Precision Add

adcx r64, r64               # Add with carry (CF only)
adox r64, r64               # Add with overflow (OF only)

Atomic Operations

lock cmpxchg [mem], r64     # Compare and exchange
lock xadd [mem], r64        # Exchange and add
lock cmpxchg8b [mem]        # Compare and exchange 8 bytes
lock cmpxchg16b [mem]       # Compare and exchange 16 bytes
lock add [mem], r64         # Atomic add
lock inc [mem]              # Atomic increment
# LOCK prefix works with: ADD, ADC, AND, BTC, BTR, BTS, CMPXCHG,
#   DEC, INC, NEG, NOT, OR, SBB, SUB, XOR, XADD

String Operations

movsb                       # Move byte [RSI] → [RDI]
movsw                       # Move word
movsd                       # Move dword
movsq                       # Move qword
stosb                       # Store AL → [RDI]
stosw / stosd / stosq
lodsb                       # Load [RSI] → AL
lodsw / lodsd / lodsq
cmpsb                       # Compare [RSI] vs [RDI]
cmpsw / cmpsd / cmpsq
scasb                       # Scan AL vs [RDI]
scasw / scasd / scasq

# With repeat prefixes
rep movsb                   # REP: repeat while RCX > 0
rep stosq
repe cmpsb                  # REPE/REPZ: repeat while equal
repne scasb                 # REPNE/REPNZ: repeat while not equal

SSE / SSE2

Arithmetic

addps xmm, xmm             # Packed single-precision add
addpd xmm, xmm             # Packed double-precision add
addss xmm, xmm             # Scalar single-precision add
addsd xmm, xmm             # Scalar double-precision add
# Same pattern: subps/pd/ss/sd, mulps/pd/ss/sd, divps/pd/ss/sd
# sqrtps/pd/ss/sd, rcpps/ss, rsqrtps/ss, maxps/pd/ss/sd, minps/pd/ss/sd

Logical

andps xmm, xmm
orps xmm, xmm
xorps xmm, xmm
andnps xmm, xmm
# Same for packed-double: andpd, orpd, xorpd, andnpd

Data Movement

movaps xmm, xmm            # Aligned packed single
movups xmm, [mem]           # Unaligned packed single
movapd xmm, xmm            # Aligned packed double
movupd xmm, [mem]           # Unaligned packed double
movss xmm, xmm             # Scalar single
movsd xmm, xmm             # Scalar double
movdqa xmm, xmm            # Aligned double quadword
movdqu xmm, [mem]           # Unaligned double quadword
movlps xmm, [mem]           # Low packed single
movhps xmm, [mem]           # High packed single
movntps [mem], xmm          # Non-temporal packed single
movntpd [mem], xmm          # Non-temporal packed double
movntdq [mem], xmm          # Non-temporal double quadword

Unpack & Shuffle

unpcklps xmm, xmm          # Unpack low packed single
unpckhps xmm, xmm          # Unpack high packed single
shufps xmm, xmm, imm8      # Shuffle packed single
shufpd xmm, xmm, imm8      # Shuffle packed double

Compare

comiss xmm, xmm            # Ordered compare scalar single
comisd xmm, xmm            # Ordered compare scalar double
ucomiss xmm, xmm           # Unordered compare scalar single
ucomisd xmm, xmm           # Unordered compare scalar double
cmpps xmm, xmm, imm8       # Compare packed single
cmppd xmm, xmm, imm8       # Compare packed double

Conversion

cvtps2pd xmm, xmm          # Packed single → packed double
cvtpd2ps xmm, xmm          # Packed double → packed single
cvtss2sd xmm, xmm          # Scalar single → scalar double
cvtsd2ss xmm, xmm          # Scalar double → scalar single
cvtdq2ps xmm, xmm          # Packed int32 → packed single
cvtps2dq xmm, xmm          # Packed single → packed int32
cvttps2dq xmm, xmm         # Packed single → packed int32 (truncate)
cvtsi2ss xmm, r32           # Scalar int → scalar single
cvtsi2sd xmm, r64           # Scalar int → scalar double
cvtss2si r32, xmm           # Scalar single → scalar int
cvtsd2si r64, xmm           # Scalar double → scalar int

Integer (SSE2)

paddb xmm, xmm             # Packed byte add
paddw xmm, xmm             # Packed word add
paddd xmm, xmm             # Packed dword add
paddq xmm, xmm             # Packed qword add
psubb/w/d/q xmm, xmm       # Packed subtract
pmullw xmm, xmm            # Packed multiply low word
pmulhw xmm, xmm            # Packed multiply high word (signed)
pmulhuw xmm, xmm           # Packed multiply high word (unsigned)
pmuludq xmm, xmm           # Packed multiply unsigned dword
pcmpeqb/w/d xmm, xmm       # Packed compare equal
pcmpgtb/w/d xmm, xmm       # Packed compare greater-than
pxor xmm, xmm              # Packed XOR
por xmm, xmm               # Packed OR
pand xmm, xmm              # Packed AND
pandn xmm, xmm             # Packed AND-NOT
psllw/d/q xmm, xmm         # Packed shift left logical
psrlw/d/q xmm, xmm         # Packed shift right logical
psraw/d xmm, xmm           # Packed shift right arithmetic
pshufd xmm, xmm, imm8      # Shuffle dwords
pshufhw xmm, xmm, imm8     # Shuffle high words
pshuflw xmm, xmm, imm8     # Shuffle low words
punpcklbw/wd/dq/qdq xmm, xmm # Unpack low
punpckhbw/wd/dq/qdq xmm, xmm # Unpack high
packuswb xmm, xmm          # Pack with unsigned saturation
packsswb/dw xmm, xmm       # Pack with signed saturation

GP ↔ XMM Transfer

movd xmm, r32               # Move dword GP → XMM
movd r32, xmm               # Move dword XMM → GP
movq xmm, r64               # Move qword GP → XMM
movq r64, xmm               # Move qword XMM → GP

SSE3 / SSSE3

# SSE3
addsubps xmm, xmm          # Alternating add/subtract single
addsubpd xmm, xmm          # Alternating add/subtract double
haddps xmm, xmm            # Horizontal add single
haddpd xmm, xmm            # Horizontal add double
hsubps/pd xmm, xmm         # Horizontal subtract
movsldup xmm, xmm          # Duplicate even single-precision
movshdup xmm, xmm          # Duplicate odd single-precision
movddup xmm, xmm           # Duplicate low double
lddqu xmm, [mem]           # Unaligned load (optimized)

# SSSE3
pshufb xmm, xmm            # Packed shuffle bytes
phaddw/d xmm, xmm          # Packed horizontal add
phsubw/d xmm, xmm          # Packed horizontal subtract
phaddsw xmm, xmm           # Packed horizontal add (saturating)
phsubsw xmm, xmm           # Packed horizontal subtract (saturating)
pmaddubsw xmm, xmm         # Multiply-accumulate unsigned/signed bytes
pmulhrsw xmm, xmm          # Packed multiply high with round
psignb/w/d xmm, xmm        # Packed sign (negate/zero/keep)
pabsb/w/d xmm, xmm         # Packed absolute value
palignr xmm, xmm, imm8     # Packed align right

SSE4.1 / SSE4.2

# SSE4.1
ptest xmm, xmm             # Logical compare (→ ZF, CF)
roundps xmm, xmm, imm8     # Round packed single
roundpd xmm, xmm, imm8     # Round packed double
roundss xmm, xmm, imm8     # Round scalar single
roundsd xmm, xmm, imm8     # Round scalar double
blendps xmm, xmm, imm8     # Blend packed single (by mask)
blendpd xmm, xmm, imm8     # Blend packed double
pblendw xmm, xmm, imm8     # Blend packed words
dpps xmm, xmm, imm8        # Dot product packed single
dppd xmm, xmm, imm8        # Dot product packed double
insertps xmm, xmm, imm8    # Insert scalar single
extractps r32, xmm, imm8   # Extract scalar single
pinsrb xmm, r32, imm8      # Insert byte
pinsrd xmm, r32, imm8      # Insert dword
pinsrq xmm, r64, imm8      # Insert qword
pextrb r32, xmm, imm8      # Extract byte
pextrd r32, xmm, imm8      # Extract dword
pextrq r64, xmm, imm8      # Extract qword
pextrw r32, xmm, imm8      # Extract word
pmovzxbw/bd/bq/wd/wq/dq xmm, xmm   # Packed zero-extend
pmovsxbw/bd/bq/wd/wq/dq xmm, xmm   # Packed sign-extend
pmuldq xmm, xmm            # Packed multiply dword → qword (signed)
pmulld xmm, xmm            # Packed multiply low dword
pcmpeqq xmm, xmm           # Packed compare equal qword
packusdw xmm, xmm          # Pack dword → word (unsigned saturation)
pminuw/sb/sd/ud xmm, xmm   # Packed minimum (various types)
pmaxuw/sb/sd/ud xmm, xmm   # Packed maximum (various types)
mpsadbw xmm, xmm, imm8     # Multiple sum of absolute differences
movntdqa xmm, [mem]         # Non-temporal aligned load

# SSE4.2
pcmpgtq xmm, xmm           # Packed compare greater-than qword
pcmpistrm xmm, xmm, imm8   # String compare (implicit length, mask)
pcmpistri xmm, xmm, imm8   # String compare (implicit length, index)
pcmpestrm xmm, xmm, imm8   # String compare (explicit length, mask)
pcmpestri xmm, xmm, imm8   # String compare (explicit length, index)
crc32 r32, r8               # CRC32C
crc32 r32, r32
crc32 r64, r8

AES-NI / SHA / PCLMULQDQ

# AES-NI
aesenc xmm, xmm            # AES encryption round
aesenclast xmm, xmm        # AES final encryption round
aesdec xmm, xmm            # AES decryption round
aesdeclast xmm, xmm        # AES final decryption round
aesimc xmm, xmm            # AES inverse mix columns
aeskeygenassist xmm, xmm, imm8  # AES key schedule assist

# SHA
sha1rnds4 xmm, xmm, imm8   # SHA-1 hash round
sha1nexte xmm, xmm         # SHA-1 next E
sha1msg1 xmm, xmm          # SHA-1 message schedule 1
sha1msg2 xmm, xmm          # SHA-1 message schedule 2
sha256rnds2 xmm, xmm       # SHA-256 hash round (implicit xmm0)
sha256msg1 xmm, xmm        # SHA-256 message schedule 1
sha256msg2 xmm, xmm        # SHA-256 message schedule 2

# PCLMULQDQ
pclmulqdq xmm, xmm, imm8   # Carry-less multiplication

AVX / AVX2

AVX instructions use VEX encoding with 3-operand non-destructive form.

Arithmetic

vaddps ymm, ymm, ymm       # Packed single add (256-bit)
vaddpd ymm, ymm, ymm       # Packed double add
vaddss xmm, xmm, xmm       # Scalar single add
vaddsd xmm, xmm, xmm       # Scalar double add
# vsubps/pd/ss/sd, vmulps/pd/ss/sd, vdivps/pd/ss/sd
# vsqrtps/pd/ss/sd, vmaxps/pd/ss/sd, vminps/pd/ss/sd

Logical

vandps ymm, ymm, ymm
vorps ymm, ymm, ymm
vxorps ymm, ymm, ymm
# vandpd, vorpd, vxorpd

Data Movement

vmovaps ymm, ymm            # Aligned packed single (256-bit)
vmovups ymm, [mem]           # Unaligned packed single
vmovapd ymm, ymm            # Aligned packed double
vmovupd ymm, [mem]           # Unaligned packed double
vmovdqa ymm, ymm            # Aligned double quadword
vmovdqu ymm, [mem]           # Unaligned double quadword
vmovss xmm, xmm, xmm        # Scalar single
vmovsd xmm, xmm, xmm        # Scalar double

Compare / Blend / Shuffle

vcmpps ymm, ymm, ymm, imm8 # Compare packed single
vcmppd ymm, ymm, ymm, imm8
vblendps ymm, ymm, ymm, imm8
vblendpd ymm, ymm, ymm, imm8
vroundps ymm, ymm, imm8
vroundpd ymm, ymm, imm8
vshufps ymm, ymm, ymm, imm8
vshufpd ymm, ymm, ymm, imm8
vunpcklps/pd ymm, ymm, ymm
vunpckhps/pd ymm, ymm, ymm

FMA3 (Fused Multiply-Add)

60 variants covering all forms:

vfmadd132ps xmm, xmm, xmm    # a = a*c + b
vfmadd213ps xmm, xmm, xmm    # a = b*a + c
vfmadd231ps xmm, xmm, xmm    # a = b*c + a
# All: vfmadd/vfmsub/vfnmadd/vfnmsub × 132/213/231 × ps/pd/ss/sd

Integer (AVX2)

vpaddb/w/d/q ymm, ymm, ymm
vpsubb/w/d/q ymm, ymm, ymm
vpmullw ymm, ymm, ymm
vpmulld ymm, ymm, ymm
vpmuludq ymm, ymm, ymm
vpcmpeqb/w/d/q ymm, ymm, ymm
vpcmpgtb/w/d/q ymm, ymm, ymm
vpand/por/pxor/pandn ymm, ymm, ymm
vpsllw/d/q ymm, ymm, xmm     # Shift left (count in xmm)
vpsllw/d/q ymm, ymm, imm8     # Shift left (immediate)
vpsrlw/d/q ymm, ymm, xmm
vpsraw/d ymm, ymm, xmm
vpslldq ymm, ymm, imm8        # Byte shift left
vpsrldq ymm, ymm, imm8        # Byte shift right

Permute / Broadcast / Gather

vpermilps ymm, ymm, ymm     # Permute in-lane single
vpermilpd ymm, ymm, ymm     # Permute in-lane double
vperm2f128 ymm, ymm, ymm, imm8  # Permute 128-bit lanes
vperm2i128 ymm, ymm, ymm, imm8
vpermd ymm, ymm, ymm        # Permute dwords
vpermps ymm, ymm, ymm       # Permute packed single
vpermq ymm, ymm, imm8       # Permute qwords
vpermpd ymm, ymm, imm8      # Permute packed double
vbroadcastss ymm, xmm       # Broadcast scalar single
vbroadcastsd ymm, xmm       # Broadcast scalar double
vbroadcastf128 ymm, [mem]   # Broadcast 128-bit
vpbroadcastb/w/d/q ymm, xmm # Broadcast integer element
vbroadcasti128 ymm, [mem]   # Broadcast integer 128-bit

Insert / Extract / Variable Shifts

vinsertf128 ymm, ymm, xmm, imm8   # Insert 128-bit float
vinserti128 ymm, ymm, xmm, imm8   # Insert 128-bit integer
vextractf128 xmm, ymm, imm8       # Extract 128-bit float
vextracti128 xmm, ymm, imm8       # Extract 128-bit integer
vpsllvd/q ymm, ymm, ymm           # Variable shift left
vpsrlvd/q ymm, ymm, ymm           # Variable shift right
vpsravd ymm, ymm, ymm             # Variable arithmetic shift right
vmaskmovps ymm, ymm, [mem]        # Masked load/store
vpmaskmovd ymm, ymm, [mem]
vpblendd ymm, ymm, ymm, imm8      # Blend dwords

VEX State Management

vzeroall                     # Zero all YMM registers
vzeroupper                   # Zero upper 128 bits of all YMM

AVX-512

AVX-512 instructions use EVEX encoding with ZMM0-ZMM31 registers.

# Arithmetic
vaddps zmm, zmm, zmm        # 512-bit packed single add
vaddpd zmm, zmm, zmm        # 512-bit packed double add
vsubps/pd zmm, zmm, zmm
vmulps/pd zmm, zmm, zmm
vdivps/pd zmm, zmm, zmm

# Logical
vandps/pd zmm, zmm, zmm
vorps/pd zmm, zmm, zmm
vxorps/pd zmm, zmm, zmm

# Data movement
vmovaps/apd zmm, zmm
vmovups/upd zmm, [mem]
vmovdqa32/64 zmm, zmm
vmovdqu8/16/32/64 zmm, [mem]

# Integer
vpaddb/w/d/q zmm, zmm, zmm
vpsubb/w/d/q zmm, zmm, zmm
vpmullw/d/q zmm, zmm, zmm

# Ternary logic
vpternlogd zmm, zmm, zmm, imm8   # Arbitrary 3-input boolean
vpternlogq zmm, zmm, zmm, imm8

# Blend
vblendmps zmm, zmm, zmm
vblendmpd zmm, zmm, zmm

# Compress / Expand
vcompressps zmm, zmm
vexpandps zmm, zmm
vpcompressd zmm, zmm
vpexpandd zmm, zmm

# Convert
vcvtdq2ps zmm, zmm
vcvtps2dq zmm, zmm
vcvtpd2ps ymm, zmm

# Variable shifts
vpsllvd/q zmm, zmm, zmm
vpsrlvd/q zmm, zmm, zmm
vpsravd/q zmm, zmm, zmm

# Shuffle / Unpack
vunpcklps/pd zmm, zmm, zmm
vunpckhps/pd zmm, zmm, zmm
vshufps zmm, zmm, zmm, imm8

AVX-512 sub-features supported: F (foundation), BW (byte/word), DQ (dword/qword).


Cache / Prefetch / Fences

prefetchnta [mem]            # Prefetch non-temporal
prefetcht0 [mem]             # Prefetch to L1
prefetcht1 [mem]             # Prefetch to L2
prefetcht2 [mem]             # Prefetch to L3
prefetchw [mem]              # Prefetch for write
clflush [mem]                # Cache line flush
clflushopt [mem]             # Optimized cache line flush
clwb [mem]                   # Cache line write-back
mfence                       # Memory fence
lfence                       # Load fence
sfence                       # Store fence

I/O

in al, imm8                 # Input byte from port
in ax, imm8                 # Input word from port
in eax, imm8                # Input dword from port
in al, dx                   # Input byte from DX port
out imm8, al                # Output byte to port
out dx, al                  # Output byte to DX port
insb / insw / insd           # Input string from port
outsb / outsw / outsd        # Output string to port

System & Security

syscall                      # System call
int imm8                     # Software interrupt
int3                         # Breakpoint (INT 3)
hlt                          # Halt
nop                          # No operation (+ multi-byte NOP2–NOP9)
ud2                          # Undefined instruction
cpuid                        # CPU identification
rdtsc                        # Read timestamp counter
rdtscp                       # Read timestamp counter and processor ID
sysenter                     # Fast system call entry
sysexit                      # Fast system call exit

# Security (CET)
endbr64                      # End branch 64-bit
endbr32                      # End branch 32-bit
rdrand r64                   # Random number
rdseed r64                   # Random seed
pause                        # Spin-loop hint

# FS/GS base
rdfsbase r64                 # Read FS base
rdgsbase r64                 # Read GS base
wrfsbase r64                 # Write FS base
wrgsbase r64                 # Write GS base

# Privileged
swapgs                       # Swap GS base
wrmsr                        # Write MSR
rdmsr                        # Read MSR
iretq                        # Interrupt return (64-bit)

# TSX
xbegin label                 # Transaction begin
xend                         # Transaction end
xabort imm8                  # Transaction abort
xtest                        # Test if in transaction

Flags

clc                          # Clear carry flag
stc                          # Set carry flag
cmc                          # Complement carry flag
cld                          # Clear direction flag
std                          # Set direction flag
cli                          # Clear interrupt flag
sti                          # Set interrupt flag
lahf                         # Load AH from flags
sahf                         # Store AH into flags
pushfq                       # Push flags
popfq                        # Pop flags

Conversion

cdq                          # Sign-extend EAX → EDX:EAX
cqo                          # Sign-extend RAX → RDX:RAX
cbw                          # Sign-extend AL → AX
cwde                         # Sign-extend AX → EAX
cdqe                         # Sign-extend EAX → RAX
cwd                          # Sign-extend AX → DX:AX
xlat                         # Table look-up translation

Extended State

fxsave [mem]                 # Save FP/MMX/SSE state
fxrstor [mem]                # Restore FP/MMX/SSE state
fxsave64 [mem]               # 64-bit variant
fxrstor64 [mem]

xsave [mem]                  # Save extended state
xrstor [mem]                 # Restore extended state
xsaveopt [mem]               # Optimized save
xsavec [mem]                 # Compacted save
xsaves [mem]                 # Supervisor save
xrstors [mem]                # Supervisor restore
# All have 64-bit variants

Memory Addressing

Intel-syntax memory addressing with full SIB support:

mov rax, [rbx]                  # base
mov rax, [rbx + 8]              # base + displacement
mov rax, [rbx + rcx]            # base + index
mov rax, [rbx + rcx*4]          # base + index*scale
mov rax, [rbx + rcx*8 + 16]     # base + index*scale + displacement
mov rax, [0x1000]               # displacement only
mov rax, fs:[0x28]              # segment override
mov byte ptr [rax], 0           # size hint

AT&T Syntax Equivalent

movq (%rbx), %rax               # base
movq 8(%rbx), %rax              # base + displacement
movq (%rbx,%rcx), %rax          # base + index
movq (%rbx,%rcx,4), %rax        # base + index*scale
movq 16(%rbx,%rcx,8), %rax      # base + index*scale + disp
movq 0x1000, %rax               # displacement only
movq %fs:0x28(%rax), %rax       # segment override

16-bit Real Mode

Switch to 16-bit encoding with .code16:

.code16
    cli
    hlt
    .fill 508, 1, 0             # pad to 510 bytes
    .word 0xAA55                 # boot signature

Use .code32 / .code64 to switch back.