aboutsummaryrefslogtreecommitdiff
path: root/core/text/regex
diff options
context:
space:
mode:
authorFeoramund <161657516+Feoramund@users.noreply.github.com>2024-07-24 16:48:49 -0400
committerFeoramund <161657516+Feoramund@users.noreply.github.com>2024-07-24 16:48:49 -0400
commit90f1f7fbdfc283b03216e15e2700331395539161 (patch)
tree8fd802531593c719970143db25f8b08dd7ad02e9 /core/text/regex
parentff492e615cc5523903b9b4d38214eefc531b4d0c (diff)
Use `unaligned_store` in `regex` too
Diffstat (limited to 'core/text/regex')
-rw-r--r--core/text/regex/compiler/compiler.odin27
1 files changed, 15 insertions, 12 deletions
diff --git a/core/text/regex/compiler/compiler.odin b/core/text/regex/compiler/compiler.odin
index 4404947f1..f5d6d2f6a 100644
--- a/core/text/regex/compiler/compiler.odin
+++ b/core/text/regex/compiler/compiler.odin
@@ -490,22 +490,25 @@ compile :: proc(tree: Node, flags: common.Flags) -> (code: Program, class_data:
#partial switch opcode {
case .Jump:
jmp := cast(^i16)&code[pc+size_of(Opcode)]
- if code[cast(i16)pc+jmp^] == .Jump {
- next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp^+size_of(Opcode)])
- jmp^ = jmp^ + next_jmp
+ jmp_value := intrinsics.unaligned_load(jmp)
+ if code[cast(i16)pc+jmp_value] == .Jump {
+ next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_value+size_of(Opcode)])
+ intrinsics.unaligned_store(jmp, jmp_value + next_jmp)
do_another_pass = true
}
case .Split:
jmp_x := cast(^i16)&code[pc+size_of(Opcode)]
- if code[cast(i16)pc+jmp_x^] == .Jump {
- next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_x^+size_of(Opcode)])
- jmp_x^ = jmp_x^ + next_jmp
+ jmp_x_value := intrinsics.unaligned_load(jmp_x)
+ if code[cast(i16)pc+jmp_x_value] == .Jump {
+ next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_x_value+size_of(Opcode)])
+ intrinsics.unaligned_store(jmp_x, jmp_x_value + next_jmp)
do_another_pass = true
}
jmp_y := cast(^i16)&code[pc+size_of(Opcode)+size_of(i16)]
- if code[cast(i16)pc+jmp_y^] == .Jump {
- next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_y^+size_of(Opcode)])
- jmp_y^ = jmp_y^ + next_jmp
+ jmp_y_value := intrinsics.unaligned_load(jmp_y)
+ if code[cast(i16)pc+jmp_y_value] == .Jump {
+ next_jmp := intrinsics.unaligned_load(cast(^i16)&code[cast(i16)pc+jmp_y_value+size_of(Opcode)])
+ intrinsics.unaligned_store(jmp_y, jmp_y_value + next_jmp)
do_another_pass = true
}
}
@@ -526,12 +529,12 @@ compile :: proc(tree: Node, flags: common.Flags) -> (code: Program, class_data:
#partial switch opcode {
case .Jump:
jmp := cast(^u16)&code[pc+size_of(Opcode)]
- jmp^ = jmp^ + cast(u16)pc
+ intrinsics.unaligned_store(jmp, intrinsics.unaligned_load(jmp) + cast(u16)pc)
case .Split:
jmp_x := cast(^u16)&code[pc+size_of(Opcode)]
- jmp_x^ = jmp_x^ + cast(u16)pc
+ intrinsics.unaligned_store(jmp_x, intrinsics.unaligned_load(jmp_x) + cast(u16)pc)
jmp_y := cast(^u16)&code[pc+size_of(Opcode)+size_of(i16)]
- jmp_y^ = jmp_y^ + cast(u16)pc
+ intrinsics.unaligned_store(jmp_y, intrinsics.unaligned_load(jmp_y) + cast(u16)pc)
}
}