6
6
@s = common dso_local global %struct.S zeroinitializer , align 4
7
7
@foo = global [6 x i16 ] [i16 1 , i16 2 , i16 3 , i16 4 , i16 5 , i16 0 ], align 2
8
8
@g = global [1048576 x i8 ] zeroinitializer , align 1
9
+ @bar = external global [0 x i8 ], align 1
9
10
10
11
11
12
define dso_local void @multiple_stores () local_unnamed_addr nounwind {
@@ -90,6 +91,15 @@ define i8* @big_offset_lui_tail() nounwind {
90
91
ret i8* getelementptr inbounds ([1048576 x i8 ], [1048576 x i8 ]* @g , i32 0 , i32 524288 )
91
92
}
92
93
94
+ define i8* @big_offset_neg_lui_tail () {
95
+ ; CHECK-LABEL: big_offset_neg_lui_tail:
96
+ ; CHECK: # %bb.0:
97
+ ; CHECK-NEXT: lui a0, %hi(bar+4294959104)
98
+ ; CHECK-NEXT: addi a0, a0, %lo(bar+4294959104)
99
+ ; CHECK-NEXT: ret
100
+ ret i8* getelementptr inbounds ([0 x i8 ], [0 x i8 ]* @bar , i32 0 , i32 -8192 )
101
+ }
102
+
93
103
define dso_local i32* @big_offset_one_use () local_unnamed_addr nounwind {
94
104
; RV32-LABEL: big_offset_one_use:
95
105
; RV32: # %bb.0: # %entry
@@ -125,11 +135,11 @@ define dso_local i32* @control_flow_no_mem(i32 %n) local_unnamed_addr nounwind {
125
135
; CHECK-NEXT: lui a0, %hi(s)
126
136
; CHECK-NEXT: addi a0, a0, %lo(s)
127
137
; CHECK-NEXT: lw a1, 164(a0)
128
- ; CHECK-NEXT: beqz a1, .LBB6_2
138
+ ; CHECK-NEXT: beqz a1, .LBB7_2
129
139
; CHECK-NEXT: # %bb.1: # %if.end
130
140
; CHECK-NEXT: addi a0, a0, 168
131
141
; CHECK-NEXT: ret
132
- ; CHECK-NEXT: .LBB6_2 : # %if.then
142
+ ; CHECK-NEXT: .LBB7_2 : # %if.then
133
143
; CHECK-NEXT: addi a0, a0, 160
134
144
; CHECK-NEXT: ret
135
145
entry:
@@ -150,13 +160,13 @@ define dso_local i32 @load_half() nounwind {
150
160
; RV32-NEXT: lui a0, %hi(foo+8)
151
161
; RV32-NEXT: lhu a0, %lo(foo+8)(a0)
152
162
; RV32-NEXT: li a1, 140
153
- ; RV32-NEXT: bne a0, a1, .LBB7_2
163
+ ; RV32-NEXT: bne a0, a1, .LBB8_2
154
164
; RV32-NEXT: # %bb.1: # %if.end
155
165
; RV32-NEXT: li a0, 0
156
166
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
157
167
; RV32-NEXT: addi sp, sp, 16
158
168
; RV32-NEXT: ret
159
- ; RV32-NEXT: .LBB7_2 : # %if.then
169
+ ; RV32-NEXT: .LBB8_2 : # %if.then
160
170
; RV32-NEXT: call abort@plt
161
171
;
162
172
; RV64-LABEL: load_half:
@@ -166,13 +176,13 @@ define dso_local i32 @load_half() nounwind {
166
176
; RV64-NEXT: lui a0, %hi(foo+8)
167
177
; RV64-NEXT: lhu a0, %lo(foo+8)(a0)
168
178
; RV64-NEXT: li a1, 140
169
- ; RV64-NEXT: bne a0, a1, .LBB7_2
179
+ ; RV64-NEXT: bne a0, a1, .LBB8_2
170
180
; RV64-NEXT: # %bb.1: # %if.end
171
181
; RV64-NEXT: li a0, 0
172
182
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
173
183
; RV64-NEXT: addi sp, sp, 16
174
184
; RV64-NEXT: ret
175
- ; RV64-NEXT: .LBB7_2 : # %if.then
185
+ ; RV64-NEXT: .LBB8_2 : # %if.then
176
186
; RV64-NEXT: call abort@plt
177
187
entry:
178
188
%0 = load i16 , i16* getelementptr inbounds ([6 x i16 ], [6 x i16 ]* @foo , i32 0 , i32 4 ), align 2
@@ -200,3 +210,32 @@ entry:
200
210
store i32 10 , i32* getelementptr inbounds (%struct.S , %struct.S* @s , i32 0 , i32 1 ), align 4
201
211
ret void
202
212
}
213
+
214
+ define i8* @neg_offset () {
215
+ ; RV32-LABEL: neg_offset:
216
+ ; RV32: # %bb.0:
217
+ ; RV32-NEXT: lui a0, %hi(bar+4294959105)
218
+ ; RV32-NEXT: addi a0, a0, %lo(bar+4294959105)
219
+ ; RV32-NEXT: ret
220
+ ;
221
+ ; RV64-LABEL: neg_offset:
222
+ ; RV64: # %bb.0:
223
+ ; RV64-NEXT: lui a0, %hi(bar)
224
+ ; RV64-NEXT: addi a0, a0, %lo(bar)
225
+ ; RV64-NEXT: lui a1, 1048574
226
+ ; RV64-NEXT: addiw a1, a1, 1
227
+ ; RV64-NEXT: add a0, a0, a1
228
+ ; RV64-NEXT: ret
229
+ ret i8* getelementptr inbounds ([0 x i8 ], [0 x i8 ]* @bar , i32 0 , i32 -8191 )
230
+ }
231
+
232
+ ; This uses an LUI+ADDI on RV64 that does not produce a simm32. For RV32, we'll
233
+ ; truncate the offset.
234
+ define i8* @neg_offset_not_simm32 () {
235
+ ; CHECK-LABEL: neg_offset_not_simm32:
236
+ ; CHECK: # %bb.0:
237
+ ; CHECK-NEXT: lui a0, %hi(bar+2147482283)
238
+ ; CHECK-NEXT: addi a0, a0, %lo(bar+2147482283)
239
+ ; CHECK-NEXT: ret
240
+ ret i8* getelementptr inbounds ([0 x i8 ], [0 x i8 ]* @bar , i32 0 , i64 -2147485013 )
241
+ }
0 commit comments