aboutsummaryrefslogtreecommitdiff
path: root/core/text/regex/compiler/debugging.odin
diff options
context:
space:
mode:
authorFeoramund <161657516+Feoramund@users.noreply.github.com>2024-07-24 16:22:04 -0400
committerFeoramund <161657516+Feoramund@users.noreply.github.com>2024-07-24 16:27:20 -0400
commitff492e615cc5523903b9b4d38214eefc531b4d0c (patch)
tree0b71b7c8f843661f4d6e3671a9c7874cafa3590a /core/text/regex/compiler/debugging.odin
parent042f6de478b67b2a19fc5acaa4999d54700f6db8 (diff)
Use `unaligned_load` for `regex` virtual machine
This should hopefully avoid any issues with loading operands greater than 8 bits on alignment-sensitive platforms.
Diffstat (limited to 'core/text/regex/compiler/debugging.odin')
-rw-r--r--core/text/regex/compiler/debugging.odin15
1 files changed, 8 insertions, 7 deletions
diff --git a/core/text/regex/compiler/debugging.odin b/core/text/regex/compiler/debugging.odin
index 1ef3e6d78..114b88fa2 100644
--- a/core/text/regex/compiler/debugging.odin
+++ b/core/text/regex/compiler/debugging.odin
@@ -1,5 +1,6 @@
package regex_compiler
+import "base:intrinsics"
import "core:io"
import "core:text/regex/common"
import "core:text/regex/virtual_machine"
@@ -9,11 +10,11 @@ get_jump_targets :: proc(code: []Opcode) -> (jump_targets: map[int]int) {
for opcode, pc in virtual_machine.iterate_opcodes(&iter) {
#partial switch opcode {
case .Jump:
- jmp := cast(int)(cast(^u16)&code[pc+1])^
+ jmp := cast(int)intrinsics.unaligned_load(cast(^u16)&code[pc+1])
jump_targets[jmp] = pc
case .Split:
- jmp_x := cast(int)(cast(^u16)&code[pc+1])^
- jmp_y := cast(int)(cast(^u16)&code[pc+3])^
+ jmp_x := cast(int)intrinsics.unaligned_load(cast(^u16)&code[pc+1])
+ jmp_y := cast(int)intrinsics.unaligned_load(cast(^u16)&code[pc+3])
jump_targets[jmp_x] = pc
jump_targets[jmp_y] = pc
}
@@ -46,18 +47,18 @@ trace :: proc(w: io.Writer, code: []Opcode) {
operand := cast(rune)code[pc+1]
io.write_encoded_rune(w, operand)
case .Rune:
- operand := (cast(^rune)&code[pc+1])^
+ operand := intrinsics.unaligned_load(cast(^rune)&code[pc+1])
io.write_encoded_rune(w, operand)
case .Rune_Class, .Rune_Class_Negated:
operand := cast(u8)code[pc+1]
common.write_padded_hex(w, operand, 2)
case .Jump:
- jmp := (cast(^u16)&code[pc+1])^
+ jmp := intrinsics.unaligned_load(cast(^u16)&code[pc+1])
io.write_string(w, "-> $")
common.write_padded_hex(w, jmp, 4)
case .Split:
- jmp_x := (cast(^u16)&code[pc+1])^
- jmp_y := (cast(^u16)&code[pc+3])^
+ jmp_x := intrinsics.unaligned_load(cast(^u16)&code[pc+1])
+ jmp_y := intrinsics.unaligned_load(cast(^u16)&code[pc+3])
io.write_string(w, "=> $")
common.write_padded_hex(w, jmp_x, 4)
io.write_string(w, ", $")