aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorFeoramund <161657516+Feoramund@users.noreply.github.com>2024-07-24 16:22:04 -0400
committerFeoramund <161657516+Feoramund@users.noreply.github.com>2024-07-24 16:27:20 -0400
commitff492e615cc5523903b9b4d38214eefc531b4d0c (patch)
tree0b71b7c8f843661f4d6e3671a9c7874cafa3590a /core
parent042f6de478b67b2a19fc5acaa4999d54700f6db8 (diff)
Use `unaligned_load` for `regex` virtual machine
This should hopefully avoid any issues with loading operands greater than 8 bits on alignment-sensitive platforms.
Diffstat (limited to 'core')
-rw-r--r--core/text/regex/compiler/compiler.odin9
-rw-r--r--core/text/regex/compiler/debugging.odin15
-rw-r--r--core/text/regex/virtual_machine/virtual_machine.odin15
3 files changed, 21 insertions, 18 deletions
diff --git a/core/text/regex/compiler/compiler.odin b/core/text/regex/compiler/compiler.odin
index 7617a7bcd..4404947f1 100644
--- a/core/text/regex/compiler/compiler.odin
+++ b/core/text/regex/compiler/compiler.odin
@@ -1,5 +1,6 @@
package regex_compiler
+import "base:intrinsics"
import "core:text/regex/common"
import "core:text/regex/parser"
import "core:text/regex/tokenizer"
@@ -408,7 +409,7 @@ compile :: proc(tree: Node, flags: common.Flags) -> (code: Program, class_data:
break add_global
case .Rune:
- operand := (cast(^rune)&code[pc+1])^
+ operand := intrinsics.unaligned_load(cast(^rune)&code[pc+1])
inject_at(&code, pc_open, Opcode.Wait_For_Rune)
pc_open += size_of(Opcode)
inject_raw(&code, pc_open, operand)
@@ -490,20 +491,20 @@ compile :: proc(tree: Node, flags: common.Flags) -> (code: Program, class_data:
case .Jump:
jmp := cast(^i16)&code[pc+size_of(Opcode)]
if code[cast(i16)pc+jmp^] == .Jump {
- next_jmp := (cast(^i16)&code[cast(i16)pc+jmp^+size_of(Opcode)])^
+ next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp^+size_of(Opcode)])
jmp^ = jmp^ + next_jmp
do_another_pass = true
}
case .Split:
jmp_x := cast(^i16)&code[pc+size_of(Opcode)]
if code[cast(i16)pc+jmp_x^] == .Jump {
- next_jmp := (cast(^i16)&code[cast(i16)pc+jmp_x^+size_of(Opcode)])^
+ next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_x^+size_of(Opcode)])
jmp_x^ = jmp_x^ + next_jmp
do_another_pass = true
}
jmp_y := cast(^i16)&code[pc+size_of(Opcode)+size_of(i16)]
if code[cast(i16)pc+jmp_y^] == .Jump {
- next_jmp := (cast(^i16)&code[cast(i16)pc+jmp_y^+size_of(Opcode)])^
+ next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_y^+size_of(Opcode)])
jmp_y^ = jmp_y^ + next_jmp
do_another_pass = true
}
diff --git a/core/text/regex/compiler/debugging.odin b/core/text/regex/compiler/debugging.odin
index 1ef3e6d78..114b88fa2 100644
--- a/core/text/regex/compiler/debugging.odin
+++ b/core/text/regex/compiler/debugging.odin
@@ -1,5 +1,6 @@
package regex_compiler
+import "base:intrinsics"
import "core:io"
import "core:text/regex/common"
import "core:text/regex/virtual_machine"
@@ -9,11 +10,11 @@ get_jump_targets :: proc(code: []Opcode) -> (jump_targets: map[int]int) {
for opcode, pc in virtual_machine.iterate_opcodes(&iter) {
#partial switch opcode {
case .Jump:
- jmp := cast(int)(cast(^u16)&code[pc+1])^
+ jmp := cast(int)intrinsics.unaligned_load(cast(^u16)&code[pc+1])
jump_targets[jmp] = pc
case .Split:
- jmp_x := cast(int)(cast(^u16)&code[pc+1])^
- jmp_y := cast(int)(cast(^u16)&code[pc+3])^
+ jmp_x := cast(int)intrinsics.unaligned_load(cast(^u16)&code[pc+1])
+ jmp_y := cast(int)intrinsics.unaligned_load(cast(^u16)&code[pc+3])
jump_targets[jmp_x] = pc
jump_targets[jmp_y] = pc
}
@@ -46,18 +47,18 @@ trace :: proc(w: io.Writer, code: []Opcode) {
operand := cast(rune)code[pc+1]
io.write_encoded_rune(w, operand)
case .Rune:
- operand := (cast(^rune)&code[pc+1])^
+ operand := intrinsics.unaligned_load(cast(^rune)&code[pc+1])
io.write_encoded_rune(w, operand)
case .Rune_Class, .Rune_Class_Negated:
operand := cast(u8)code[pc+1]
common.write_padded_hex(w, operand, 2)
case .Jump:
- jmp := (cast(^u16)&code[pc+1])^
+ jmp := intrinsics.unaligned_load(cast(^u16)&code[pc+1])
io.write_string(w, "-> $")
common.write_padded_hex(w, jmp, 4)
case .Split:
- jmp_x := (cast(^u16)&code[pc+1])^
- jmp_y := (cast(^u16)&code[pc+3])^
+ jmp_x := intrinsics.unaligned_load(cast(^u16)&code[pc+1])
+ jmp_y := intrinsics.unaligned_load(cast(^u16)&code[pc+3])
io.write_string(w, "=> $")
common.write_padded_hex(w, jmp_x, 4)
io.write_string(w, ", $")
diff --git a/core/text/regex/virtual_machine/virtual_machine.odin b/core/text/regex/virtual_machine/virtual_machine.odin
index f102fb78c..7eb6b1f9b 100644
--- a/core/text/regex/virtual_machine/virtual_machine.odin
+++ b/core/text/regex/virtual_machine/virtual_machine.odin
@@ -1,5 +1,6 @@
package regex_vm
+import "base:intrinsics"
@require import "core:io"
import "core:slice"
import "core:text/regex/common"
@@ -121,12 +122,12 @@ add_thread :: proc(vm: ^Machine, saved: ^[2 * common.MAX_CAPTURE_GROUPS]int, pc:
#partial switch vm.code[pc] {
case .Jump:
- pc = cast(int)(cast(^u16)&vm.code[pc + size_of(Opcode)])^
+ pc = cast(int)intrinsics.unaligned_load(cast(^u16)&vm.code[pc + size_of(Opcode)])
continue
case .Split:
- jmp_x := cast(int)(cast(^u16)&vm.code[pc + size_of(Opcode)])^
- jmp_y := cast(int)(cast(^u16)&vm.code[pc + size_of(Opcode) + size_of(u16)])^
+ jmp_x := cast(int)intrinsics.unaligned_load(cast(^u16)&vm.code[pc + size_of(Opcode)])
+ jmp_y := cast(int)intrinsics.unaligned_load(cast(^u16)&vm.code[pc + size_of(Opcode) + size_of(u16)])
add_thread(vm, saved, jmp_x)
pc = jmp_y
@@ -236,7 +237,7 @@ add_thread :: proc(vm: ^Machine, saved: ^[2 * common.MAX_CAPTURE_GROUPS]int, pc:
vm.top_thread += 1
case .Wait_For_Rune:
- operand := (cast(^rune)&vm.code[pc + size_of(Opcode)])^
+ operand := intrinsics.unaligned_load(cast(^rune)&vm.code[pc + size_of(Opcode)])
if vm.next_rune == operand {
add_thread(vm, saved, pc + size_of(Opcode) + size_of(rune))
}
@@ -409,7 +410,7 @@ run :: proc(vm: ^Machine, $UNICODE_MODE: bool) -> (saved: ^[2 * common.MAX_CAPTU
}
case .Rune:
- operand := (cast(^rune)&vm.code[t.pc + size_of(Opcode)])^
+ operand := intrinsics.unaligned_load(cast(^rune)&vm.code[t.pc + size_of(Opcode)])
if current_rune == operand {
add_thread(vm, t.saved, t.pc + size_of(Opcode) + size_of(rune))
}
@@ -482,7 +483,7 @@ run :: proc(vm: ^Machine, $UNICODE_MODE: bool) -> (saved: ^[2 * common.MAX_CAPTU
vm.top_thread += 1
case .Wait_For_Rune:
- operand := (cast(^rune)&vm.code[t.pc + size_of(Opcode)])^
+ operand := intrinsics.unaligned_load(cast(^rune)&vm.code[t.pc + size_of(Opcode)])
if vm.next_rune == operand {
add_thread(vm, t.saved, t.pc + size_of(Opcode) + size_of(rune))
}
@@ -558,7 +559,7 @@ run :: proc(vm: ^Machine, $UNICODE_MODE: bool) -> (saved: ^[2 * common.MAX_CAPTU
break escape_loop
case .Jump:
- t.pc = cast(int)(cast(^u16)&vm.code[t.pc + size_of(Opcode)])^
+ t.pc = cast(int)intrinsics.unaligned_load(cast(^u16)&vm.code[t.pc + size_of(Opcode)])
case .Save:
index := vm.code[t.pc + size_of(Opcode)]