forked from llvm/clangir
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathCIRGenCXX.cpp
425 lines (367 loc) · 16.2 KB
/
CIRGenCXX.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
//===--- CGCXX.cpp - Emit LLVM Code for declarations ----------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This contains code dealing with C++ code generation.
//
//===----------------------------------------------------------------------===//
// We might split this into multiple files if it gets too unwieldy
#include "CIRGenCXXABI.h"
#include "CIRGenFunction.h"
#include "CIRGenModule.h"
#include "clang/AST/GlobalDecl.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/SaveAndRestore.h"
#include <cassert>
using namespace clang;
using namespace clang::CIRGen;
/// Try to emit a base destructor as an alias to its primary
/// base-class destructor.
bool CIRGenModule::tryEmitBaseDestructorAsAlias(const CXXDestructorDecl *D) {
if (!getCodeGenOpts().CXXCtorDtorAliases)
return true;
// Producing an alias to a base class ctor/dtor can degrade debug quality
// as the debugger cannot tell them apart.
if (getCodeGenOpts().OptimizationLevel == 0)
return true;
// If sanitizing memory to check for use-after-dtor, do not emit as
// an alias, unless this class owns no members.
if (getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
!D->getParent()->field_empty())
assert(!cir::MissingFeatures::sanitizeDtor());
// If the destructor doesn't have a trivial body, we have to emit it
// separately.
if (!D->hasTrivialBody())
return true;
const CXXRecordDecl *Class = D->getParent();
// We are going to instrument this destructor, so give up even if it is
// currently empty.
if (Class->mayInsertExtraPadding())
return true;
// If we need to manipulate a VTT parameter, give up.
if (Class->getNumVBases()) {
// Extra Credit: passing extra parameters is perfectly safe
// in many calling conventions, so only bail out if the ctor's
// calling convention is nonstandard.
return true;
}
// If any field has a non-trivial destructor, we have to emit the
// destructor separately.
for (const auto *I : Class->fields())
if (I->getType().isDestructedType())
return true;
// Try to find a unique base class with a non-trivial destructor.
const CXXRecordDecl *UniqueBase = nullptr;
for (const auto &I : Class->bases()) {
// We're in the base destructor, so skip virtual bases.
if (I.isVirtual())
continue;
// Skip base classes with trivial destructors.
const auto *Base =
cast<CXXRecordDecl>(I.getType()->castAs<RecordType>()->getDecl());
if (Base->hasTrivialDestructor())
continue;
// If we've already found a base class with a non-trivial
// destructor, give up.
if (UniqueBase)
return true;
UniqueBase = Base;
}
// If we didn't find any bases with a non-trivial destructor, then
// the base destructor is actually effectively trivial, which can
// happen if it was needlessly user-defined or if there are virtual
// bases with non-trivial destructors.
if (!UniqueBase)
return true;
// If the base is at a non-zero offset, give up.
const ASTRecordLayout &ClassLayout = astCtx.getASTRecordLayout(Class);
if (!ClassLayout.getBaseClassOffset(UniqueBase).isZero())
return true;
// Give up if the calling conventions don't match. We could update the call,
// but it is probably not worth it.
const CXXDestructorDecl *BaseD = UniqueBase->getDestructor();
if (BaseD->getType()->castAs<FunctionType>()->getCallConv() !=
D->getType()->castAs<FunctionType>()->getCallConv())
return true;
GlobalDecl AliasDecl(D, Dtor_Base);
GlobalDecl TargetDecl(BaseD, Dtor_Base);
// The alias will use the linkage of the referent. If we can't
// support aliases with that linkage, fail.
auto Linkage = getFunctionLinkage(AliasDecl);
// We can't use an alias if the linkage is not valid for one.
if (!cir::isValidLinkage(Linkage))
return true;
auto TargetLinkage = getFunctionLinkage(TargetDecl);
// Check if we have it already.
StringRef MangledName = getMangledName(AliasDecl);
auto Entry = getGlobalValue(MangledName);
auto globalValue = dyn_cast_or_null<cir::CIRGlobalValueInterface>(Entry);
if (Entry && globalValue && !globalValue.isDeclaration())
return false;
if (Replacements.count(MangledName))
return false;
[[maybe_unused]] auto AliasValueType = getTypes().GetFunctionType(AliasDecl);
// Find the referent.
auto Aliasee = cast<cir::FuncOp>(GetAddrOfGlobal(TargetDecl));
auto AliaseeGV = dyn_cast_or_null<cir::CIRGlobalValueInterface>(
GetAddrOfGlobal(TargetDecl));
// Instead of creating as alias to a linkonce_odr, replace all of the uses
// of the aliasee.
if (cir::isDiscardableIfUnused(Linkage) &&
!(TargetLinkage == cir::GlobalLinkageKind::AvailableExternallyLinkage &&
TargetDecl.getDecl()->hasAttr<AlwaysInlineAttr>())) {
// FIXME: An extern template instantiation will create functions with
// linkage "AvailableExternally". In libc++, some classes also define
// members with attribute "AlwaysInline" and expect no reference to
// be generated. It is desirable to reenable this optimisation after
// corresponding LLVM changes.
addReplacement(MangledName, Aliasee);
return false;
}
// If we have a weak, non-discardable alias (weak, weak_odr), like an
// extern template instantiation or a dllexported class, avoid forming it on
// COFF. A COFF weak external alias cannot satisfy a normal undefined
// symbol reference from another TU. The other TU must also mark the
// referenced symbol as weak, which we cannot rely on.
if (cir::isWeakForLinker(Linkage) && getTriple().isOSBinFormatCOFF()) {
llvm_unreachable("please sent a PR with a test and remove this.\n");
return true;
}
// If we don't have a definition for the destructor yet or the definition
// is
// avaialable_externally, don't emit an alias. We can't emit aliases to
// declarations; that's just not how aliases work.
if (AliaseeGV && AliaseeGV.isDeclarationForLinker())
return true;
// Don't create an alias to a linker weak symbol. This avoids producing
// different COMDATs in different TUs. Another option would be to
// output the alias both for weak_odr and linkonce_odr, but that
// requires explicit comdat support in the IL.
if (cir::isWeakForLinker(TargetLinkage)) {
llvm_unreachable("please sent a PR with a test and remove this.\n");
return true;
}
// Create the alias with no name.
emitAliasForGlobal(MangledName, Entry, AliasDecl, Aliasee, Linkage);
return false;
}
static void emitDeclInit(CIRGenFunction &CGF, const VarDecl *D,
Address DeclPtr) {
assert((D->hasGlobalStorage() ||
(D->hasLocalStorage() &&
CGF.getContext().getLangOpts().OpenCLCPlusPlus)) &&
"VarDecl must have global or local (in the case of OpenCL) storage!");
assert(!D->getType()->isReferenceType() &&
"Should not call emitDeclInit on a reference!");
QualType type = D->getType();
LValue lv = CGF.makeAddrLValue(DeclPtr, type);
const Expr *Init = D->getInit();
switch (CIRGenFunction::getEvaluationKind(type)) {
case cir::TEK_Aggregate:
CGF.emitAggExpr(Init,
AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
return;
case cir::TEK_Scalar:
CGF.emitScalarInit(Init, CGF.getLoc(D->getLocation()), lv, false);
return;
case cir::TEK_Complex:
llvm_unreachable("complext evaluation NYI");
}
}
static void emitDeclDestroy(CIRGenFunction &CGF, const VarDecl *D) {
// Honor __attribute__((no_destroy)) and bail instead of attempting
// to emit a reference to a possibly nonexistent destructor, which
// in turn can cause a crash. This will result in a global constructor
// that isn't balanced out by a destructor call as intended by the
// attribute. This also checks for -fno-c++-static-destructors and
// bails even if the attribute is not present.
QualType::DestructionKind DtorKind = D->needsDestruction(CGF.getContext());
// FIXME: __attribute__((cleanup)) ?
switch (DtorKind) {
case QualType::DK_none:
return;
case QualType::DK_cxx_destructor:
break;
case QualType::DK_objc_strong_lifetime:
case QualType::DK_objc_weak_lifetime:
case QualType::DK_nontrivial_c_struct:
// We don't care about releasing objects during process teardown.
assert(!D->getTLSKind() && "should have rejected this");
return;
}
auto &CGM = CGF.CGM;
QualType type = D->getType();
// Special-case non-array C++ destructors, if they have the right signature.
// Under some ABIs, destructors return this instead of void, and cannot be
// passed directly to __cxa_atexit if the target does not allow this
// mismatch.
const CXXRecordDecl *Record = type->getAsCXXRecordDecl();
bool CanRegisterDestructor =
Record && (!CGM.getCXXABI().HasThisReturn(
GlobalDecl(Record->getDestructor(), Dtor_Complete)) ||
CGM.getCXXABI().canCallMismatchedFunctionType());
// If __cxa_atexit is disabled via a flag, a different helper function is
// generated elsewhere which uses atexit instead, and it takes the destructor
// directly.
auto UsingExternalHelper = CGM.getCodeGenOpts().CXAAtExit;
cir::FuncOp fnOp;
if (Record && (CanRegisterDestructor || UsingExternalHelper)) {
assert(!D->getTLSKind() && "TLS NYI");
assert(!Record->hasTrivialDestructor());
assert(!cir::MissingFeatures::openCLCXX());
CXXDestructorDecl *Dtor = Record->getDestructor();
// In LLVM OG codegen this is done in registerGlobalDtor, but CIRGen
// relies on LoweringPrepare for further decoupling, so build the
// call right here.
auto GD = GlobalDecl(Dtor, Dtor_Complete);
auto structorInfo = CGM.getAddrAndTypeOfCXXStructor(GD);
fnOp = structorInfo.second;
CGF.getBuilder().createCallOp(
CGF.getLoc(D->getSourceRange()),
mlir::FlatSymbolRefAttr::get(fnOp.getSymNameAttr()),
mlir::ValueRange{CGF.CGM.getAddrOfGlobalVar(D)});
} else {
llvm_unreachable("array destructors not yet supported!");
}
assert(fnOp && "expected cir.func");
CGM.getCXXABI().registerGlobalDtor(CGF, D, fnOp, nullptr);
}
cir::FuncOp CIRGenModule::codegenCXXStructor(GlobalDecl GD) {
const auto &FnInfo = getTypes().arrangeCXXStructorDeclaration(GD);
auto Fn = getAddrOfCXXStructor(GD, &FnInfo, /*FnType=*/nullptr,
/*DontDefer=*/true, ForDefinition);
setFunctionLinkage(GD, Fn);
CIRGenFunction CGF{*this, builder};
CurCGF = &CGF;
{
mlir::OpBuilder::InsertionGuard guard(builder);
CGF.generateCode(GD, Fn, FnInfo);
}
CurCGF = nullptr;
setNonAliasAttributes(GD, Fn);
setCIRFunctionAttributesForDefinition(cast<CXXMethodDecl>(GD.getDecl()), Fn);
return Fn;
}
/// Emit code to cause the variable at the given address to be considered as
/// constant from this point onwards.
static void emitDeclInvariant(CIRGenFunction &CGF, const VarDecl *D) {
return CGF.emitInvariantStart(
CGF.getContext().getTypeSizeInChars(D->getType()));
}
void CIRGenFunction::emitInvariantStart([[maybe_unused]] CharUnits Size) {
// Do not emit the intrinsic if we're not optimizing.
if (!CGM.getCodeGenOpts().OptimizationLevel)
return;
assert(!cir::MissingFeatures::createInvariantIntrinsic());
}
void CIRGenModule::emitCXXGlobalVarDeclInit(const VarDecl *varDecl,
cir::GlobalOp addr,
bool performInit) {
const Expr *init = varDecl->getInit();
QualType ty = varDecl->getType();
// TODO: handle address space
// The address space of a static local variable (DeclPtr) may be different
// from the address space of the "this" argument of the constructor. In that
// case, we need an addrspacecast before calling the constructor.
//
// struct StructWithCtor {
// __device__ StructWithCtor() {...}
// };
// __device__ void foo() {
// __shared__ StructWithCtor s;
// ...
// }
//
// For example, in the above CUDA code, the static local variable s has a
// "shared" address space qualifier, but the constructor of StructWithCtor
// expects "this" in the "generic" address space.
assert(!cir::MissingFeatures::addressSpace());
if (getLangOpts().OpenMP && !getLangOpts().OpenMPSimd &&
varDecl->hasAttr<OMPThreadPrivateDeclAttr>()) {
llvm_unreachable("NYI");
}
assert(varDecl && " Expected a global declaration!");
CIRGenFunction cgf{*this, builder, true};
llvm::SaveAndRestore<CIRGenFunction*> savedCGF(CurCGF, &cgf);
CurCGF->CurFn = addr;
CIRGenFunction::SourceLocRAIIObject fnLoc{cgf,
getLoc(varDecl->getLocation())};
addr.setAstAttr(cir::ASTVarDeclAttr::get(&getMLIRContext(), varDecl));
if (ty->isReferenceType()) {
mlir::OpBuilder::InsertionGuard guard(builder);
auto *block = builder.createBlock(&addr.getCtorRegion());
CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(),
builder.getInsertionBlock()};
lexScope.setAsGlobalInit();
builder.setInsertionPointToStart(block);
auto getGlobal = builder.createGetGlobal(addr);
Address declAddr(getGlobal, getGlobal.getType(),
getASTContext().getDeclAlign(varDecl));
assert(performInit && "cannot have constant initializer which needs "
"destruction for reference");
RValue rv = cgf.emitReferenceBindingToExpr(init);
{
mlir::OpBuilder::InsertionGuard guard(builder);
mlir::Operation *rvalueDefOp = rv.getScalarVal().getDefiningOp();
if (rvalueDefOp && rvalueDefOp->getBlock()) {
mlir::Block *rvalSrcBlock = rvalueDefOp->getBlock();
if (!rvalSrcBlock->empty() && isa<cir::YieldOp>(rvalSrcBlock->back())) {
auto &front = rvalSrcBlock->front();
getGlobal.getDefiningOp()->moveBefore(&front);
auto yield = cast<cir::YieldOp>(rvalSrcBlock->back());
builder.setInsertionPoint(yield);
}
}
cgf.emitStoreOfScalar(rv.getScalarVal(), declAddr, false, ty);
}
builder.setInsertionPointToEnd(block);
builder.create<cir::YieldOp>(addr->getLoc());
} else {
bool needsDtor = varDecl->needsDestruction(getASTContext()) ==
QualType::DK_cxx_destructor;
// PerformInit, constant store invariant / destroy handled below.
bool isConstantStorage =
varDecl->getType().isConstantStorage(getASTContext(), true, !needsDtor);
if (performInit) {
mlir::OpBuilder::InsertionGuard guard(builder);
auto *block = builder.createBlock(&addr.getCtorRegion());
CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(),
builder.getInsertionBlock()};
lexScope.setAsGlobalInit();
builder.setInsertionPointToStart(block);
Address declAddr(getAddrOfGlobalVar(varDecl),
getASTContext().getDeclAlign(varDecl));
emitDeclInit(cgf, varDecl, declAddr);
builder.setInsertionPointToEnd(block);
builder.create<cir::YieldOp>(addr->getLoc());
}
if (isConstantStorage) {
// TODO: this leads to a missing feature in the moment, probably also need
// a LexicalScope to be inserted here.
emitDeclInvariant(cgf, varDecl);
} else {
// If not constant storage we'll emit this regardless of NeedsDtor value.
mlir::OpBuilder::InsertionGuard guard(builder);
auto *block = builder.createBlock(&addr.getDtorRegion());
CIRGenFunction::LexicalScope lexScope{*CurCGF, addr.getLoc(),
builder.getInsertionBlock()};
lexScope.setAsGlobalInit();
builder.setInsertionPointToStart(block);
emitDeclDestroy(cgf, varDecl);
builder.setInsertionPointToEnd(block);
if (block->empty()) {
block->erase();
// Don't confuse lexical cleanup.
builder.clearInsertionPoint();
} else
builder.create<cir::YieldOp>(addr->getLoc());
}
}
}