1344a3780SDimitry Andric //===-- AMDGPULowerModuleLDSPass.cpp ------------------------------*- C++ -*-=//
2344a3780SDimitry Andric //
3344a3780SDimitry Andric // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4344a3780SDimitry Andric // See https://llvm.org/LICENSE.txt for license information.
5344a3780SDimitry Andric // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6344a3780SDimitry Andric //
7344a3780SDimitry Andric //===----------------------------------------------------------------------===//
8344a3780SDimitry Andric //
9e3b55780SDimitry Andric // This pass eliminates local data store, LDS, uses from non-kernel functions.
10e3b55780SDimitry Andric // LDS is contiguous memory allocated per kernel execution.
11344a3780SDimitry Andric //
12e3b55780SDimitry Andric // Background.
13344a3780SDimitry Andric //
14e3b55780SDimitry Andric // The programming model is global variables, or equivalently function local
15e3b55780SDimitry Andric // static variables, accessible from kernels or other functions. For uses from
16e3b55780SDimitry Andric // kernels this is straightforward - assign an integer to the kernel for the
17e3b55780SDimitry Andric // memory required by all the variables combined, allocate them within that.
18e3b55780SDimitry Andric // For uses from functions there are performance tradeoffs to choose between.
19e3b55780SDimitry Andric //
20e3b55780SDimitry Andric // This model means the GPU runtime can specify the amount of memory allocated.
21e3b55780SDimitry Andric // If this is more than the kernel assumed, the excess can be made available
22e3b55780SDimitry Andric // using a language specific feature, which IR represents as a variable with
237fa27ce4SDimitry Andric // no initializer. This feature is referred to here as "Dynamic LDS" and is
247fa27ce4SDimitry Andric // lowered slightly differently to the normal case.
25e3b55780SDimitry Andric //
26e3b55780SDimitry Andric // Consequences of this GPU feature:
27e3b55780SDimitry Andric // - memory is limited and exceeding it halts compilation
28e3b55780SDimitry Andric // - a global accessed by one kernel exists independent of other kernels
29e3b55780SDimitry Andric // - a global exists independent of simultaneous execution of the same kernel
30e3b55780SDimitry Andric // - the address of the global may be different from different kernels as they
31e3b55780SDimitry Andric // do not alias, which permits only allocating variables they use
32e3b55780SDimitry Andric // - if the address is allowed to differ, functions need help to find it
33e3b55780SDimitry Andric //
34e3b55780SDimitry Andric // Uses from kernels are implemented here by grouping them in a per-kernel
35e3b55780SDimitry Andric // struct instance. This duplicates the variables, accurately modelling their
36e3b55780SDimitry Andric // aliasing properties relative to a single global representation. It also
37e3b55780SDimitry Andric // permits control over alignment via padding.
38e3b55780SDimitry Andric //
39e3b55780SDimitry Andric // Uses from functions are more complicated and the primary purpose of this
40e3b55780SDimitry Andric // IR pass. Several different lowering are chosen between to meet requirements
41e3b55780SDimitry Andric // to avoid allocating any LDS where it is not necessary, as that impacts
42e3b55780SDimitry Andric // occupancy and may fail the compilation, while not imposing overhead on a
43e3b55780SDimitry Andric // feature whose primary advantage over global memory is performance. The basic
44e3b55780SDimitry Andric // design goal is to avoid one kernel imposing overhead on another.
45e3b55780SDimitry Andric //
46e3b55780SDimitry Andric // Implementation.
47e3b55780SDimitry Andric //
48e3b55780SDimitry Andric // LDS variables with constant annotation or non-undef initializer are passed
49145449b1SDimitry Andric // through unchanged for simplification or error diagnostics in later passes.
50e3b55780SDimitry Andric // Non-undef initializers are not yet implemented for LDS.
51344a3780SDimitry Andric //
52e3b55780SDimitry Andric // LDS variables that are always allocated at the same address can be found
53e3b55780SDimitry Andric // by lookup at that address. Otherwise runtime information/cost is required.
54344a3780SDimitry Andric //
55e3b55780SDimitry Andric // The simplest strategy possible is to group all LDS variables in a single
56e3b55780SDimitry Andric // struct and allocate that struct in every kernel such that the original
57e3b55780SDimitry Andric // variables are always at the same address. LDS is however a limited resource
58e3b55780SDimitry Andric // so this strategy is unusable in practice. It is not implemented here.
59e3b55780SDimitry Andric //
60e3b55780SDimitry Andric // Strategy | Precise allocation | Zero runtime cost | General purpose |
61e3b55780SDimitry Andric // --------+--------------------+-------------------+-----------------+
62e3b55780SDimitry Andric // Module | No | Yes | Yes |
63e3b55780SDimitry Andric // Table | Yes | No | Yes |
64e3b55780SDimitry Andric // Kernel | Yes | Yes | No |
65e3b55780SDimitry Andric // Hybrid | Yes | Partial | Yes |
66e3b55780SDimitry Andric //
677fa27ce4SDimitry Andric // "Module" spends LDS memory to save cycles. "Table" spends cycles and global
687fa27ce4SDimitry Andric // memory to save LDS. "Kernel" is as fast as kernel allocation but only works
697fa27ce4SDimitry Andric // for variables that are known reachable from a single kernel. "Hybrid" picks
707fa27ce4SDimitry Andric // between all three. When forced to choose between LDS and cycles we minimise
71e3b55780SDimitry Andric // LDS use.
72e3b55780SDimitry Andric
73e3b55780SDimitry Andric // The "module" lowering implemented here finds LDS variables which are used by
74e3b55780SDimitry Andric // non-kernel functions and creates a new struct with a field for each of those
75e3b55780SDimitry Andric // LDS variables. Variables that are only used from kernels are excluded.
76e3b55780SDimitry Andric //
77e3b55780SDimitry Andric // The "table" lowering implemented here has three components.
78e3b55780SDimitry Andric // First kernels are assigned a unique integer identifier which is available in
79e3b55780SDimitry Andric // functions it calls through the intrinsic amdgcn_lds_kernel_id. The integer
80e3b55780SDimitry Andric // is passed through a specific SGPR, thus works with indirect calls.
81e3b55780SDimitry Andric // Second, each kernel allocates LDS variables independent of other kernels and
82e3b55780SDimitry Andric // writes the addresses it chose for each variable into an array in consistent
83e3b55780SDimitry Andric // order. If the kernel does not allocate a given variable, it writes undef to
84e3b55780SDimitry Andric // the corresponding array location. These arrays are written to a constant
85e3b55780SDimitry Andric // table in the order matching the kernel unique integer identifier.
86e3b55780SDimitry Andric // Third, uses from non-kernel functions are replaced with a table lookup using
87e3b55780SDimitry Andric // the intrinsic function to find the address of the variable.
88e3b55780SDimitry Andric //
89e3b55780SDimitry Andric // "Kernel" lowering is only applicable for variables that are unambiguously
90e3b55780SDimitry Andric // reachable from exactly one kernel. For those cases, accesses to the variable
91e3b55780SDimitry Andric // can be lowered to ConstantExpr address of a struct instance specific to that
92e3b55780SDimitry Andric // one kernel. This is zero cost in space and in compute. It will raise a fatal
93e3b55780SDimitry Andric // error on any variable that might be reachable from multiple kernels and is
94e3b55780SDimitry Andric // thus most easily used as part of the hybrid lowering strategy.
95e3b55780SDimitry Andric //
96e3b55780SDimitry Andric // Hybrid lowering is a mixture of the above. It uses the zero cost kernel
97e3b55780SDimitry Andric // lowering where it can. It lowers the variable accessed by the greatest
98e3b55780SDimitry Andric // number of kernels using the module strategy as that is free for the first
99e3b55780SDimitry Andric // variable. Any futher variables that can be lowered with the module strategy
100e3b55780SDimitry Andric // without incurring LDS memory overhead are. The remaining ones are lowered
101e3b55780SDimitry Andric // via table.
102e3b55780SDimitry Andric //
103e3b55780SDimitry Andric // Consequences
104e3b55780SDimitry Andric // - No heuristics or user controlled magic numbers, hybrid is the right choice
105e3b55780SDimitry Andric // - Kernels that don't use functions (or have had them all inlined) are not
106e3b55780SDimitry Andric // affected by any lowering for kernels that do.
107e3b55780SDimitry Andric // - Kernels that don't make indirect function calls are not affected by those
108e3b55780SDimitry Andric // that do.
109e3b55780SDimitry Andric // - Variables which are used by lots of kernels, e.g. those injected by a
110e3b55780SDimitry Andric // language runtime in most kernels, are expected to have no overhead
111e3b55780SDimitry Andric // - Implementations that instantiate templates per-kernel where those templates
112e3b55780SDimitry Andric // use LDS are expected to hit the "Kernel" lowering strategy
113e3b55780SDimitry Andric // - The runtime properties impose a cost in compiler implementation complexity
114344a3780SDimitry Andric //
1157fa27ce4SDimitry Andric // Dynamic LDS implementation
1167fa27ce4SDimitry Andric // Dynamic LDS is lowered similarly to the "table" strategy above and uses the
1177fa27ce4SDimitry Andric // same intrinsic to identify which kernel is at the root of the dynamic call
1187fa27ce4SDimitry Andric // graph. This relies on the specified behaviour that all dynamic LDS variables
1197fa27ce4SDimitry Andric // alias one another, i.e. are at the same address, with respect to a given
1207fa27ce4SDimitry Andric // kernel. Therefore this pass creates new dynamic LDS variables for each kernel
1217fa27ce4SDimitry Andric // that allocates any dynamic LDS and builds a table of addresses out of those.
1227fa27ce4SDimitry Andric // The AMDGPUPromoteAlloca pass skips kernels that use dynamic LDS.
1237fa27ce4SDimitry Andric // The corresponding optimisation for "kernel" lowering where the table lookup
1247fa27ce4SDimitry Andric // is elided is not implemented.
1257fa27ce4SDimitry Andric //
1267fa27ce4SDimitry Andric //
1277fa27ce4SDimitry Andric // Implementation notes / limitations
1287fa27ce4SDimitry Andric // A single LDS global variable represents an instance per kernel that can reach
1297fa27ce4SDimitry Andric // said variables. This pass essentially specialises said variables per kernel.
1307fa27ce4SDimitry Andric // Handling ConstantExpr during the pass complicated this significantly so now
1317fa27ce4SDimitry Andric // all ConstantExpr uses of LDS variables are expanded to instructions. This
1327fa27ce4SDimitry Andric // may need amending when implementing non-undef initialisers.
1337fa27ce4SDimitry Andric //
1347fa27ce4SDimitry Andric // Lowering is split between this IR pass and the back end. This pass chooses
1357fa27ce4SDimitry Andric // where given variables should be allocated and marks them with metadata,
1367fa27ce4SDimitry Andric // MD_absolute_symbol. The backend places the variables in coincidentally the
1377fa27ce4SDimitry Andric // same location and raises a fatal error if something has gone awry. This works
1387fa27ce4SDimitry Andric // in practice because the only pass between this one and the backend that
1397fa27ce4SDimitry Andric // changes LDS is PromoteAlloca and the changes it makes do not conflict.
1407fa27ce4SDimitry Andric //
1417fa27ce4SDimitry Andric // Addresses are written to constant global arrays based on the same metadata.
1427fa27ce4SDimitry Andric //
1437fa27ce4SDimitry Andric // The backend lowers LDS variables in the order of traversal of the function.
1447fa27ce4SDimitry Andric // This is at odds with the deterministic layout required. The workaround is to
1457fa27ce4SDimitry Andric // allocate the fixed-address variables immediately upon starting the function
1467fa27ce4SDimitry Andric // where they can be placed as intended. This requires a means of mapping from
1477fa27ce4SDimitry Andric // the function to the variables that it allocates. For the module scope lds,
1487fa27ce4SDimitry Andric // this is via metadata indicating whether the variable is not required. If a
1497fa27ce4SDimitry Andric // pass deletes that metadata, a fatal error on disagreement with the absolute
1507fa27ce4SDimitry Andric // symbol metadata will occur. For kernel scope and dynamic, this is by _name_
1517fa27ce4SDimitry Andric // correspondence between the function and the variable. It requires the
1527fa27ce4SDimitry Andric // kernel to have a name (which is only a limitation for tests in practice) and
1537fa27ce4SDimitry Andric // for nothing to rename the corresponding symbols. This is a hazard if the pass
1547fa27ce4SDimitry Andric // is run multiple times during debugging. Alternative schemes considered all
1557fa27ce4SDimitry Andric // involve bespoke metadata.
1567fa27ce4SDimitry Andric //
1577fa27ce4SDimitry Andric // If the name correspondence can be replaced, multiple distinct kernels that
1587fa27ce4SDimitry Andric // have the same memory layout can map to the same kernel id (as the address
1597fa27ce4SDimitry Andric // itself is handled by the absolute symbol metadata) and that will allow more
1607fa27ce4SDimitry Andric // uses of the "kernel" style faster lowering and reduce the size of the lookup
1617fa27ce4SDimitry Andric // tables.
1627fa27ce4SDimitry Andric //
1637fa27ce4SDimitry Andric // There is a test that checks this does not fire for a graphics shader. This
1647fa27ce4SDimitry Andric // lowering is expected to work for graphics if the isKernel test is changed.
1657fa27ce4SDimitry Andric //
1667fa27ce4SDimitry Andric // The current markUsedByKernel is sufficient for PromoteAlloca but is elided
1677fa27ce4SDimitry Andric // before codegen. Replacing this with an equivalent intrinsic which lasts until
1687fa27ce4SDimitry Andric // shortly after the machine function lowering of LDS would help break the name
1697fa27ce4SDimitry Andric // mapping. The other part needed is probably to amend PromoteAlloca to embed
1707fa27ce4SDimitry Andric // the LDS variables it creates in the same struct created here. That avoids the
1717fa27ce4SDimitry Andric // current hazard where a PromoteAlloca LDS variable might be allocated before
1727fa27ce4SDimitry Andric // the kernel scope (and thus error on the address check). Given a new invariant
1737fa27ce4SDimitry Andric // that no LDS variables exist outside of the structs managed here, and an
1747fa27ce4SDimitry Andric // intrinsic that lasts until after the LDS frame lowering, it should be
1757fa27ce4SDimitry Andric // possible to drop the name mapping and fold equivalent memory layouts.
1767fa27ce4SDimitry Andric //
177344a3780SDimitry Andric //===----------------------------------------------------------------------===//
178344a3780SDimitry Andric
179344a3780SDimitry Andric #include "AMDGPU.h"
180b1c73532SDimitry Andric #include "AMDGPUTargetMachine.h"
181344a3780SDimitry Andric #include "Utils/AMDGPUBaseInfo.h"
182145449b1SDimitry Andric #include "Utils/AMDGPUMemoryUtils.h"
18308e8dd7bSDimitry Andric #include "llvm/ADT/BitVector.h"
18408e8dd7bSDimitry Andric #include "llvm/ADT/DenseMap.h"
185e3b55780SDimitry Andric #include "llvm/ADT/DenseSet.h"
186344a3780SDimitry Andric #include "llvm/ADT/STLExtras.h"
187e3b55780SDimitry Andric #include "llvm/ADT/SetOperations.h"
188145449b1SDimitry Andric #include "llvm/Analysis/CallGraph.h"
189b1c73532SDimitry Andric #include "llvm/CodeGen/TargetPassConfig.h"
190344a3780SDimitry Andric #include "llvm/IR/Constants.h"
191344a3780SDimitry Andric #include "llvm/IR/DerivedTypes.h"
192344a3780SDimitry Andric #include "llvm/IR/IRBuilder.h"
193344a3780SDimitry Andric #include "llvm/IR/InlineAsm.h"
194344a3780SDimitry Andric #include "llvm/IR/Instructions.h"
195e3b55780SDimitry Andric #include "llvm/IR/IntrinsicsAMDGPU.h"
196c0981da4SDimitry Andric #include "llvm/IR/MDBuilder.h"
1977fa27ce4SDimitry Andric #include "llvm/IR/ReplaceConstant.h"
198344a3780SDimitry Andric #include "llvm/InitializePasses.h"
199344a3780SDimitry Andric #include "llvm/Pass.h"
200344a3780SDimitry Andric #include "llvm/Support/CommandLine.h"
201344a3780SDimitry Andric #include "llvm/Support/Debug.h"
2027fa27ce4SDimitry Andric #include "llvm/Support/Format.h"
203344a3780SDimitry Andric #include "llvm/Support/OptimizedStructLayout.h"
2047fa27ce4SDimitry Andric #include "llvm/Support/raw_ostream.h"
205e3b55780SDimitry Andric #include "llvm/Transforms/Utils/BasicBlockUtils.h"
206344a3780SDimitry Andric #include "llvm/Transforms/Utils/ModuleUtils.h"
207e3b55780SDimitry Andric
208344a3780SDimitry Andric #include <vector>
209344a3780SDimitry Andric
210e3b55780SDimitry Andric #include <cstdio>
211e3b55780SDimitry Andric
212344a3780SDimitry Andric #define DEBUG_TYPE "amdgpu-lower-module-lds"
213344a3780SDimitry Andric
214344a3780SDimitry Andric using namespace llvm;
215ac9a064cSDimitry Andric using namespace AMDGPU;
216344a3780SDimitry Andric
217e3b55780SDimitry Andric namespace {
218e3b55780SDimitry Andric
219e3b55780SDimitry Andric cl::opt<bool> SuperAlignLDSGlobals(
220344a3780SDimitry Andric "amdgpu-super-align-lds-globals",
221344a3780SDimitry Andric cl::desc("Increase alignment of LDS if it is not on align boundary"),
222344a3780SDimitry Andric cl::init(true), cl::Hidden);
223344a3780SDimitry Andric
224e3b55780SDimitry Andric enum class LoweringKind { module, table, kernel, hybrid };
225e3b55780SDimitry Andric cl::opt<LoweringKind> LoweringKindLoc(
226e3b55780SDimitry Andric "amdgpu-lower-module-lds-strategy",
227e3b55780SDimitry Andric cl::desc("Specify lowering strategy for function LDS access:"), cl::Hidden,
2287fa27ce4SDimitry Andric cl::init(LoweringKind::hybrid),
229e3b55780SDimitry Andric cl::values(
230e3b55780SDimitry Andric clEnumValN(LoweringKind::table, "table", "Lower via table lookup"),
231e3b55780SDimitry Andric clEnumValN(LoweringKind::module, "module", "Lower via module struct"),
232e3b55780SDimitry Andric clEnumValN(
233e3b55780SDimitry Andric LoweringKind::kernel, "kernel",
234e3b55780SDimitry Andric "Lower variables reachable from one kernel, otherwise abort"),
235e3b55780SDimitry Andric clEnumValN(LoweringKind::hybrid, "hybrid",
236e3b55780SDimitry Andric "Lower via mixture of above strategies")));
237e3b55780SDimitry Andric
sortByName(std::vector<T> && V)2387fa27ce4SDimitry Andric template <typename T> std::vector<T> sortByName(std::vector<T> &&V) {
2397fa27ce4SDimitry Andric llvm::sort(V.begin(), V.end(), [](const auto *L, const auto *R) {
2407fa27ce4SDimitry Andric return L->getName() < R->getName();
2417fa27ce4SDimitry Andric });
2427fa27ce4SDimitry Andric return {std::move(V)};
2437fa27ce4SDimitry Andric }
2447fa27ce4SDimitry Andric
245b1c73532SDimitry Andric class AMDGPULowerModuleLDS {
246b1c73532SDimitry Andric const AMDGPUTargetMachine &TM;
247344a3780SDimitry Andric
248344a3780SDimitry Andric static void
removeLocalVarsFromUsedLists(Module & M,const DenseSet<GlobalVariable * > & LocalVars)249e3b55780SDimitry Andric removeLocalVarsFromUsedLists(Module &M,
250e3b55780SDimitry Andric const DenseSet<GlobalVariable *> &LocalVars) {
25108e8dd7bSDimitry Andric // The verifier rejects used lists containing an inttoptr of a constant
25208e8dd7bSDimitry Andric // so remove the variables from these lists before replaceAllUsesWith
253e3b55780SDimitry Andric SmallPtrSet<Constant *, 8> LocalVarsSet;
25477fc4c14SDimitry Andric for (GlobalVariable *LocalVar : LocalVars)
255e3b55780SDimitry Andric LocalVarsSet.insert(cast<Constant>(LocalVar->stripPointerCasts()));
256e3b55780SDimitry Andric
257e3b55780SDimitry Andric removeFromUsedLists(
258e3b55780SDimitry Andric M, [&LocalVarsSet](Constant *C) { return LocalVarsSet.count(C); });
259e3b55780SDimitry Andric
260e3b55780SDimitry Andric for (GlobalVariable *LocalVar : LocalVars)
261e3b55780SDimitry Andric LocalVar->removeDeadConstantUsers();
262344a3780SDimitry Andric }
263344a3780SDimitry Andric
markUsedByKernel(Function * Func,GlobalVariable * SGV)2647fa27ce4SDimitry Andric static void markUsedByKernel(Function *Func, GlobalVariable *SGV) {
265344a3780SDimitry Andric // The llvm.amdgcn.module.lds instance is implicitly used by all kernels
266344a3780SDimitry Andric // that might call a function which accesses a field within it. This is
267344a3780SDimitry Andric // presently approximated to 'all kernels' if there are any such functions
268c0981da4SDimitry Andric // in the module. This implicit use is redefined as an explicit use here so
269344a3780SDimitry Andric // that later passes, specifically PromoteAlloca, account for the required
270344a3780SDimitry Andric // memory without any knowledge of this transform.
271344a3780SDimitry Andric
272344a3780SDimitry Andric // An operand bundle on llvm.donothing works because the call instruction
273344a3780SDimitry Andric // survives until after the last pass that needs to account for LDS. It is
274344a3780SDimitry Andric // better than inline asm as the latter survives until the end of codegen. A
275344a3780SDimitry Andric // totally robust solution would be a function with the same semantics as
276344a3780SDimitry Andric // llvm.donothing that takes a pointer to the instance and is lowered to a
277344a3780SDimitry Andric // no-op after LDS is allocated, but that is not presently necessary.
278344a3780SDimitry Andric
2797fa27ce4SDimitry Andric // This intrinsic is eliminated shortly before instruction selection. It
2807fa27ce4SDimitry Andric // does not suffice to indicate to ISel that a given global which is not
2817fa27ce4SDimitry Andric // immediately used by the kernel must still be allocated by it. An
2827fa27ce4SDimitry Andric // equivalent target specific intrinsic which lasts until immediately after
2837fa27ce4SDimitry Andric // codegen would suffice for that, but one would still need to ensure that
284ac9a064cSDimitry Andric // the variables are allocated in the anticipated order.
285b1c73532SDimitry Andric BasicBlock *Entry = &Func->getEntryBlock();
286b1c73532SDimitry Andric IRBuilder<> Builder(Entry, Entry->getFirstNonPHIIt());
287344a3780SDimitry Andric
288344a3780SDimitry Andric Function *Decl =
289344a3780SDimitry Andric Intrinsic::getDeclaration(Func->getParent(), Intrinsic::donothing, {});
290344a3780SDimitry Andric
2917fa27ce4SDimitry Andric Value *UseInstance[1] = {
2927fa27ce4SDimitry Andric Builder.CreateConstInBoundsGEP1_32(SGV->getValueType(), SGV, 0)};
293344a3780SDimitry Andric
2947fa27ce4SDimitry Andric Builder.CreateCall(
2957fa27ce4SDimitry Andric Decl, {}, {OperandBundleDefT<Value *>("ExplicitUse", UseInstance)});
296344a3780SDimitry Andric }
297344a3780SDimitry Andric
298344a3780SDimitry Andric public:
AMDGPULowerModuleLDS(const AMDGPUTargetMachine & TM_)299b1c73532SDimitry Andric AMDGPULowerModuleLDS(const AMDGPUTargetMachine &TM_) : TM(TM_) {}
300344a3780SDimitry Andric
301e3b55780SDimitry Andric struct LDSVariableReplacement {
302e3b55780SDimitry Andric GlobalVariable *SGV = nullptr;
303e3b55780SDimitry Andric DenseMap<GlobalVariable *, Constant *> LDSVarsToConstantGEP;
304e3b55780SDimitry Andric };
305e3b55780SDimitry Andric
306e3b55780SDimitry Andric // remap from lds global to a constantexpr gep to where it has been moved to
307e3b55780SDimitry Andric // for each kernel
308e3b55780SDimitry Andric // an array with an element for each kernel containing where the corresponding
309e3b55780SDimitry Andric // variable was remapped to
310e3b55780SDimitry Andric
getAddressesOfVariablesInKernel(LLVMContext & Ctx,ArrayRef<GlobalVariable * > Variables,const DenseMap<GlobalVariable *,Constant * > & LDSVarsToConstantGEP)311e3b55780SDimitry Andric static Constant *getAddressesOfVariablesInKernel(
312e3b55780SDimitry Andric LLVMContext &Ctx, ArrayRef<GlobalVariable *> Variables,
3137fa27ce4SDimitry Andric const DenseMap<GlobalVariable *, Constant *> &LDSVarsToConstantGEP) {
314e3b55780SDimitry Andric // Create a ConstantArray containing the address of each Variable within the
315e3b55780SDimitry Andric // kernel corresponding to LDSVarsToConstantGEP, or poison if that kernel
316e3b55780SDimitry Andric // does not allocate it
317e3b55780SDimitry Andric // TODO: Drop the ptrtoint conversion
318e3b55780SDimitry Andric
319e3b55780SDimitry Andric Type *I32 = Type::getInt32Ty(Ctx);
320e3b55780SDimitry Andric
321e3b55780SDimitry Andric ArrayType *KernelOffsetsType = ArrayType::get(I32, Variables.size());
322e3b55780SDimitry Andric
323e3b55780SDimitry Andric SmallVector<Constant *> Elements;
324ac9a064cSDimitry Andric for (GlobalVariable *GV : Variables) {
3257fa27ce4SDimitry Andric auto ConstantGepIt = LDSVarsToConstantGEP.find(GV);
3267fa27ce4SDimitry Andric if (ConstantGepIt != LDSVarsToConstantGEP.end()) {
3277fa27ce4SDimitry Andric auto elt = ConstantExpr::getPtrToInt(ConstantGepIt->second, I32);
328e3b55780SDimitry Andric Elements.push_back(elt);
329e3b55780SDimitry Andric } else {
330e3b55780SDimitry Andric Elements.push_back(PoisonValue::get(I32));
331e3b55780SDimitry Andric }
332e3b55780SDimitry Andric }
333e3b55780SDimitry Andric return ConstantArray::get(KernelOffsetsType, Elements);
334e3b55780SDimitry Andric }
335e3b55780SDimitry Andric
buildLookupTable(Module & M,ArrayRef<GlobalVariable * > Variables,ArrayRef<Function * > kernels,DenseMap<Function *,LDSVariableReplacement> & KernelToReplacement)336e3b55780SDimitry Andric static GlobalVariable *buildLookupTable(
337e3b55780SDimitry Andric Module &M, ArrayRef<GlobalVariable *> Variables,
338e3b55780SDimitry Andric ArrayRef<Function *> kernels,
339e3b55780SDimitry Andric DenseMap<Function *, LDSVariableReplacement> &KernelToReplacement) {
340e3b55780SDimitry Andric if (Variables.empty()) {
341e3b55780SDimitry Andric return nullptr;
342e3b55780SDimitry Andric }
343e3b55780SDimitry Andric LLVMContext &Ctx = M.getContext();
344e3b55780SDimitry Andric
345e3b55780SDimitry Andric const size_t NumberVariables = Variables.size();
346e3b55780SDimitry Andric const size_t NumberKernels = kernels.size();
347e3b55780SDimitry Andric
348e3b55780SDimitry Andric ArrayType *KernelOffsetsType =
349e3b55780SDimitry Andric ArrayType::get(Type::getInt32Ty(Ctx), NumberVariables);
350e3b55780SDimitry Andric
351e3b55780SDimitry Andric ArrayType *AllKernelsOffsetsType =
352e3b55780SDimitry Andric ArrayType::get(KernelOffsetsType, NumberKernels);
353e3b55780SDimitry Andric
3547fa27ce4SDimitry Andric Constant *Missing = PoisonValue::get(KernelOffsetsType);
355e3b55780SDimitry Andric std::vector<Constant *> overallConstantExprElts(NumberKernels);
356e3b55780SDimitry Andric for (size_t i = 0; i < NumberKernels; i++) {
3577fa27ce4SDimitry Andric auto Replacement = KernelToReplacement.find(kernels[i]);
3587fa27ce4SDimitry Andric overallConstantExprElts[i] =
3597fa27ce4SDimitry Andric (Replacement == KernelToReplacement.end())
3607fa27ce4SDimitry Andric ? Missing
3617fa27ce4SDimitry Andric : getAddressesOfVariablesInKernel(
3627fa27ce4SDimitry Andric Ctx, Variables, Replacement->second.LDSVarsToConstantGEP);
363e3b55780SDimitry Andric }
364e3b55780SDimitry Andric
365e3b55780SDimitry Andric Constant *init =
366e3b55780SDimitry Andric ConstantArray::get(AllKernelsOffsetsType, overallConstantExprElts);
367e3b55780SDimitry Andric
368e3b55780SDimitry Andric return new GlobalVariable(
369e3b55780SDimitry Andric M, AllKernelsOffsetsType, true, GlobalValue::InternalLinkage, init,
370e3b55780SDimitry Andric "llvm.amdgcn.lds.offset.table", nullptr, GlobalValue::NotThreadLocal,
371e3b55780SDimitry Andric AMDGPUAS::CONSTANT_ADDRESS);
372e3b55780SDimitry Andric }
373e3b55780SDimitry Andric
replaceUseWithTableLookup(Module & M,IRBuilder<> & Builder,GlobalVariable * LookupTable,GlobalVariable * GV,Use & U,Value * OptionalIndex)3747fa27ce4SDimitry Andric void replaceUseWithTableLookup(Module &M, IRBuilder<> &Builder,
3757fa27ce4SDimitry Andric GlobalVariable *LookupTable,
3767fa27ce4SDimitry Andric GlobalVariable *GV, Use &U,
3777fa27ce4SDimitry Andric Value *OptionalIndex) {
3787fa27ce4SDimitry Andric // Table is a constant array of the same length as OrderedKernels
379e3b55780SDimitry Andric LLVMContext &Ctx = M.getContext();
380e3b55780SDimitry Andric Type *I32 = Type::getInt32Ty(Ctx);
3817fa27ce4SDimitry Andric auto *I = cast<Instruction>(U.getUser());
382e3b55780SDimitry Andric
3837fa27ce4SDimitry Andric Value *tableKernelIndex = getTableLookupKernelIndex(M, I->getFunction());
384e3b55780SDimitry Andric
385e3b55780SDimitry Andric if (auto *Phi = dyn_cast<PHINode>(I)) {
386e3b55780SDimitry Andric BasicBlock *BB = Phi->getIncomingBlock(U);
387e3b55780SDimitry Andric Builder.SetInsertPoint(&(*(BB->getFirstInsertionPt())));
388e3b55780SDimitry Andric } else {
389e3b55780SDimitry Andric Builder.SetInsertPoint(I);
390e3b55780SDimitry Andric }
391e3b55780SDimitry Andric
3927fa27ce4SDimitry Andric SmallVector<Value *, 3> GEPIdx = {
393e3b55780SDimitry Andric ConstantInt::get(I32, 0),
394e3b55780SDimitry Andric tableKernelIndex,
395e3b55780SDimitry Andric };
3967fa27ce4SDimitry Andric if (OptionalIndex)
3977fa27ce4SDimitry Andric GEPIdx.push_back(OptionalIndex);
398e3b55780SDimitry Andric
399e3b55780SDimitry Andric Value *Address = Builder.CreateInBoundsGEP(
400e3b55780SDimitry Andric LookupTable->getValueType(), LookupTable, GEPIdx, GV->getName());
401e3b55780SDimitry Andric
402e3b55780SDimitry Andric Value *loaded = Builder.CreateLoad(I32, Address);
403e3b55780SDimitry Andric
404e3b55780SDimitry Andric Value *replacement =
405e3b55780SDimitry Andric Builder.CreateIntToPtr(loaded, GV->getType(), GV->getName());
406e3b55780SDimitry Andric
407e3b55780SDimitry Andric U.set(replacement);
408e3b55780SDimitry Andric }
4097fa27ce4SDimitry Andric
replaceUsesInInstructionsWithTableLookup(Module & M,ArrayRef<GlobalVariable * > ModuleScopeVariables,GlobalVariable * LookupTable)4107fa27ce4SDimitry Andric void replaceUsesInInstructionsWithTableLookup(
4117fa27ce4SDimitry Andric Module &M, ArrayRef<GlobalVariable *> ModuleScopeVariables,
4127fa27ce4SDimitry Andric GlobalVariable *LookupTable) {
4137fa27ce4SDimitry Andric
4147fa27ce4SDimitry Andric LLVMContext &Ctx = M.getContext();
4157fa27ce4SDimitry Andric IRBuilder<> Builder(Ctx);
4167fa27ce4SDimitry Andric Type *I32 = Type::getInt32Ty(Ctx);
4177fa27ce4SDimitry Andric
4187fa27ce4SDimitry Andric for (size_t Index = 0; Index < ModuleScopeVariables.size(); Index++) {
4197fa27ce4SDimitry Andric auto *GV = ModuleScopeVariables[Index];
4207fa27ce4SDimitry Andric
4217fa27ce4SDimitry Andric for (Use &U : make_early_inc_range(GV->uses())) {
4227fa27ce4SDimitry Andric auto *I = dyn_cast<Instruction>(U.getUser());
4237fa27ce4SDimitry Andric if (!I)
4247fa27ce4SDimitry Andric continue;
4257fa27ce4SDimitry Andric
4267fa27ce4SDimitry Andric replaceUseWithTableLookup(M, Builder, LookupTable, GV, U,
4277fa27ce4SDimitry Andric ConstantInt::get(I32, Index));
4287fa27ce4SDimitry Andric }
429e3b55780SDimitry Andric }
430e3b55780SDimitry Andric }
431e3b55780SDimitry Andric
kernelsThatIndirectlyAccessAnyOfPassedVariables(Module & M,LDSUsesInfoTy & LDSUsesInfo,DenseSet<GlobalVariable * > const & VariableSet)432e3b55780SDimitry Andric static DenseSet<Function *> kernelsThatIndirectlyAccessAnyOfPassedVariables(
433e3b55780SDimitry Andric Module &M, LDSUsesInfoTy &LDSUsesInfo,
434e3b55780SDimitry Andric DenseSet<GlobalVariable *> const &VariableSet) {
435e3b55780SDimitry Andric
436e3b55780SDimitry Andric DenseSet<Function *> KernelSet;
437e3b55780SDimitry Andric
4387fa27ce4SDimitry Andric if (VariableSet.empty())
4397fa27ce4SDimitry Andric return KernelSet;
440e3b55780SDimitry Andric
441e3b55780SDimitry Andric for (Function &Func : M.functions()) {
442e3b55780SDimitry Andric if (Func.isDeclaration() || !isKernelLDS(&Func))
443e3b55780SDimitry Andric continue;
444e3b55780SDimitry Andric for (GlobalVariable *GV : LDSUsesInfo.indirect_access[&Func]) {
445e3b55780SDimitry Andric if (VariableSet.contains(GV)) {
446e3b55780SDimitry Andric KernelSet.insert(&Func);
447e3b55780SDimitry Andric break;
448e3b55780SDimitry Andric }
449e3b55780SDimitry Andric }
450e3b55780SDimitry Andric }
451e3b55780SDimitry Andric
452e3b55780SDimitry Andric return KernelSet;
453e3b55780SDimitry Andric }
454e3b55780SDimitry Andric
455e3b55780SDimitry Andric static GlobalVariable *
chooseBestVariableForModuleStrategy(const DataLayout & DL,VariableFunctionMap & LDSVars)456e3b55780SDimitry Andric chooseBestVariableForModuleStrategy(const DataLayout &DL,
457e3b55780SDimitry Andric VariableFunctionMap &LDSVars) {
458e3b55780SDimitry Andric // Find the global variable with the most indirect uses from kernels
459e3b55780SDimitry Andric
460e3b55780SDimitry Andric struct CandidateTy {
461e3b55780SDimitry Andric GlobalVariable *GV = nullptr;
462e3b55780SDimitry Andric size_t UserCount = 0;
463e3b55780SDimitry Andric size_t Size = 0;
464e3b55780SDimitry Andric
465e3b55780SDimitry Andric CandidateTy() = default;
466e3b55780SDimitry Andric
467e3b55780SDimitry Andric CandidateTy(GlobalVariable *GV, uint64_t UserCount, uint64_t AllocSize)
468e3b55780SDimitry Andric : GV(GV), UserCount(UserCount), Size(AllocSize) {}
469e3b55780SDimitry Andric
470e3b55780SDimitry Andric bool operator<(const CandidateTy &Other) const {
471e3b55780SDimitry Andric // Fewer users makes module scope variable less attractive
472e3b55780SDimitry Andric if (UserCount < Other.UserCount) {
473e3b55780SDimitry Andric return true;
474e3b55780SDimitry Andric }
475e3b55780SDimitry Andric if (UserCount > Other.UserCount) {
476e3b55780SDimitry Andric return false;
477e3b55780SDimitry Andric }
478e3b55780SDimitry Andric
479e3b55780SDimitry Andric // Bigger makes module scope variable less attractive
480e3b55780SDimitry Andric if (Size < Other.Size) {
481e3b55780SDimitry Andric return false;
482e3b55780SDimitry Andric }
483e3b55780SDimitry Andric
484e3b55780SDimitry Andric if (Size > Other.Size) {
485e3b55780SDimitry Andric return true;
486e3b55780SDimitry Andric }
487e3b55780SDimitry Andric
488e3b55780SDimitry Andric // Arbitrary but consistent
489e3b55780SDimitry Andric return GV->getName() < Other.GV->getName();
490e3b55780SDimitry Andric }
491e3b55780SDimitry Andric };
492e3b55780SDimitry Andric
493e3b55780SDimitry Andric CandidateTy MostUsed;
494e3b55780SDimitry Andric
495e3b55780SDimitry Andric for (auto &K : LDSVars) {
496e3b55780SDimitry Andric GlobalVariable *GV = K.first;
497e3b55780SDimitry Andric if (K.second.size() <= 1) {
498e3b55780SDimitry Andric // A variable reachable by only one kernel is best lowered with kernel
499e3b55780SDimitry Andric // strategy
500e3b55780SDimitry Andric continue;
501e3b55780SDimitry Andric }
5027fa27ce4SDimitry Andric CandidateTy Candidate(
5037fa27ce4SDimitry Andric GV, K.second.size(),
504e3b55780SDimitry Andric DL.getTypeAllocSize(GV->getValueType()).getFixedValue());
505e3b55780SDimitry Andric if (MostUsed < Candidate)
506e3b55780SDimitry Andric MostUsed = Candidate;
507e3b55780SDimitry Andric }
508e3b55780SDimitry Andric
509e3b55780SDimitry Andric return MostUsed.GV;
510e3b55780SDimitry Andric }
511e3b55780SDimitry Andric
recordLDSAbsoluteAddress(Module * M,GlobalVariable * GV,uint32_t Address)5127fa27ce4SDimitry Andric static void recordLDSAbsoluteAddress(Module *M, GlobalVariable *GV,
5137fa27ce4SDimitry Andric uint32_t Address) {
5147fa27ce4SDimitry Andric // Write the specified address into metadata where it can be retrieved by
5157fa27ce4SDimitry Andric // the assembler. Format is a half open range, [Address Address+1)
5167fa27ce4SDimitry Andric LLVMContext &Ctx = M->getContext();
5177fa27ce4SDimitry Andric auto *IntTy =
5187fa27ce4SDimitry Andric M->getDataLayout().getIntPtrType(Ctx, AMDGPUAS::LOCAL_ADDRESS);
5197fa27ce4SDimitry Andric auto *MinC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address));
5207fa27ce4SDimitry Andric auto *MaxC = ConstantAsMetadata::get(ConstantInt::get(IntTy, Address + 1));
5217fa27ce4SDimitry Andric GV->setMetadata(LLVMContext::MD_absolute_symbol,
5227fa27ce4SDimitry Andric MDNode::get(Ctx, {MinC, MaxC}));
5237fa27ce4SDimitry Andric }
52408e8dd7bSDimitry Andric
5257fa27ce4SDimitry Andric DenseMap<Function *, Value *> tableKernelIndexCache;
getTableLookupKernelIndex(Module & M,Function * F)5267fa27ce4SDimitry Andric Value *getTableLookupKernelIndex(Module &M, Function *F) {
5277fa27ce4SDimitry Andric // Accesses from a function use the amdgcn_lds_kernel_id intrinsic which
5287fa27ce4SDimitry Andric // lowers to a read from a live in register. Emit it once in the entry
5297fa27ce4SDimitry Andric // block to spare deduplicating it later.
5307fa27ce4SDimitry Andric auto [It, Inserted] = tableKernelIndexCache.try_emplace(F);
5317fa27ce4SDimitry Andric if (Inserted) {
5327fa27ce4SDimitry Andric Function *Decl =
5337fa27ce4SDimitry Andric Intrinsic::getDeclaration(&M, Intrinsic::amdgcn_lds_kernel_id, {});
534344a3780SDimitry Andric
5357fa27ce4SDimitry Andric auto InsertAt = F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca();
5367fa27ce4SDimitry Andric IRBuilder<> Builder(&*InsertAt);
53708e8dd7bSDimitry Andric
5387fa27ce4SDimitry Andric It->second = Builder.CreateCall(Decl, {});
5397fa27ce4SDimitry Andric }
54008e8dd7bSDimitry Andric
5417fa27ce4SDimitry Andric return It->second;
5427fa27ce4SDimitry Andric }
5437fa27ce4SDimitry Andric
assignLDSKernelIDToEachKernel(Module * M,DenseSet<Function * > const & KernelsThatAllocateTableLDS,DenseSet<Function * > const & KernelsThatIndirectlyAllocateDynamicLDS)5447fa27ce4SDimitry Andric static std::vector<Function *> assignLDSKernelIDToEachKernel(
5457fa27ce4SDimitry Andric Module *M, DenseSet<Function *> const &KernelsThatAllocateTableLDS,
5467fa27ce4SDimitry Andric DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS) {
547ac9a064cSDimitry Andric // Associate kernels in the set with an arbitrary but reproducible order and
5487fa27ce4SDimitry Andric // annotate them with that order in metadata. This metadata is recognised by
5497fa27ce4SDimitry Andric // the backend and lowered to a SGPR which can be read from using
5507fa27ce4SDimitry Andric // amdgcn_lds_kernel_id.
5517fa27ce4SDimitry Andric
5527fa27ce4SDimitry Andric std::vector<Function *> OrderedKernels;
5537fa27ce4SDimitry Andric if (!KernelsThatAllocateTableLDS.empty() ||
5547fa27ce4SDimitry Andric !KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
5557fa27ce4SDimitry Andric
5567fa27ce4SDimitry Andric for (Function &Func : M->functions()) {
5577fa27ce4SDimitry Andric if (Func.isDeclaration())
5587fa27ce4SDimitry Andric continue;
5597fa27ce4SDimitry Andric if (!isKernelLDS(&Func))
5607fa27ce4SDimitry Andric continue;
5617fa27ce4SDimitry Andric
5627fa27ce4SDimitry Andric if (KernelsThatAllocateTableLDS.contains(&Func) ||
5637fa27ce4SDimitry Andric KernelsThatIndirectlyAllocateDynamicLDS.contains(&Func)) {
5647fa27ce4SDimitry Andric assert(Func.hasName()); // else fatal error earlier
5657fa27ce4SDimitry Andric OrderedKernels.push_back(&Func);
566e3b55780SDimitry Andric }
567e3b55780SDimitry Andric }
56808e8dd7bSDimitry Andric
5697fa27ce4SDimitry Andric // Put them in an arbitrary but reproducible order
5707fa27ce4SDimitry Andric OrderedKernels = sortByName(std::move(OrderedKernels));
57108e8dd7bSDimitry Andric
5727fa27ce4SDimitry Andric // Annotate the kernels with their order in this vector
5737fa27ce4SDimitry Andric LLVMContext &Ctx = M->getContext();
5747fa27ce4SDimitry Andric IRBuilder<> Builder(Ctx);
5757fa27ce4SDimitry Andric
5767fa27ce4SDimitry Andric if (OrderedKernels.size() > UINT32_MAX) {
5777fa27ce4SDimitry Andric // 32 bit keeps it in one SGPR. > 2**32 kernels won't fit on the GPU
5787fa27ce4SDimitry Andric report_fatal_error("Unimplemented LDS lowering for > 2**32 kernels");
5797fa27ce4SDimitry Andric }
5807fa27ce4SDimitry Andric
5817fa27ce4SDimitry Andric for (size_t i = 0; i < OrderedKernels.size(); i++) {
5827fa27ce4SDimitry Andric Metadata *AttrMDArgs[1] = {
5837fa27ce4SDimitry Andric ConstantAsMetadata::get(Builder.getInt32(i)),
5847fa27ce4SDimitry Andric };
5857fa27ce4SDimitry Andric OrderedKernels[i]->setMetadata("llvm.amdgcn.lds.kernel.id",
5867fa27ce4SDimitry Andric MDNode::get(Ctx, AttrMDArgs));
5877fa27ce4SDimitry Andric }
5887fa27ce4SDimitry Andric }
5897fa27ce4SDimitry Andric return OrderedKernels;
5907fa27ce4SDimitry Andric }
5917fa27ce4SDimitry Andric
partitionVariablesIntoIndirectStrategies(Module & M,LDSUsesInfoTy const & LDSUsesInfo,VariableFunctionMap & LDSToKernelsThatNeedToAccessItIndirectly,DenseSet<GlobalVariable * > & ModuleScopeVariables,DenseSet<GlobalVariable * > & TableLookupVariables,DenseSet<GlobalVariable * > & KernelAccessVariables,DenseSet<GlobalVariable * > & DynamicVariables)5927fa27ce4SDimitry Andric static void partitionVariablesIntoIndirectStrategies(
5937fa27ce4SDimitry Andric Module &M, LDSUsesInfoTy const &LDSUsesInfo,
5947fa27ce4SDimitry Andric VariableFunctionMap &LDSToKernelsThatNeedToAccessItIndirectly,
5957fa27ce4SDimitry Andric DenseSet<GlobalVariable *> &ModuleScopeVariables,
5967fa27ce4SDimitry Andric DenseSet<GlobalVariable *> &TableLookupVariables,
5977fa27ce4SDimitry Andric DenseSet<GlobalVariable *> &KernelAccessVariables,
5987fa27ce4SDimitry Andric DenseSet<GlobalVariable *> &DynamicVariables) {
5997fa27ce4SDimitry Andric
600e3b55780SDimitry Andric GlobalVariable *HybridModuleRoot =
601e3b55780SDimitry Andric LoweringKindLoc != LoweringKind::hybrid
602e3b55780SDimitry Andric ? nullptr
603e3b55780SDimitry Andric : chooseBestVariableForModuleStrategy(
6047fa27ce4SDimitry Andric M.getDataLayout(), LDSToKernelsThatNeedToAccessItIndirectly);
60508e8dd7bSDimitry Andric
606e3b55780SDimitry Andric DenseSet<Function *> const EmptySet;
607e3b55780SDimitry Andric DenseSet<Function *> const &HybridModuleRootKernels =
608e3b55780SDimitry Andric HybridModuleRoot
609e3b55780SDimitry Andric ? LDSToKernelsThatNeedToAccessItIndirectly[HybridModuleRoot]
610e3b55780SDimitry Andric : EmptySet;
611e3b55780SDimitry Andric
612e3b55780SDimitry Andric for (auto &K : LDSToKernelsThatNeedToAccessItIndirectly) {
613e3b55780SDimitry Andric // Each iteration of this loop assigns exactly one global variable to
614e3b55780SDimitry Andric // exactly one of the implementation strategies.
615e3b55780SDimitry Andric
616e3b55780SDimitry Andric GlobalVariable *GV = K.first;
617e3b55780SDimitry Andric assert(AMDGPU::isLDSVariableToLower(*GV));
618e3b55780SDimitry Andric assert(K.second.size() != 0);
619e3b55780SDimitry Andric
6207fa27ce4SDimitry Andric if (AMDGPU::isDynamicLDS(*GV)) {
6217fa27ce4SDimitry Andric DynamicVariables.insert(GV);
6227fa27ce4SDimitry Andric continue;
6237fa27ce4SDimitry Andric }
6247fa27ce4SDimitry Andric
625e3b55780SDimitry Andric switch (LoweringKindLoc) {
626e3b55780SDimitry Andric case LoweringKind::module:
627e3b55780SDimitry Andric ModuleScopeVariables.insert(GV);
628e3b55780SDimitry Andric break;
629e3b55780SDimitry Andric
630e3b55780SDimitry Andric case LoweringKind::table:
631e3b55780SDimitry Andric TableLookupVariables.insert(GV);
632e3b55780SDimitry Andric break;
633e3b55780SDimitry Andric
634e3b55780SDimitry Andric case LoweringKind::kernel:
635e3b55780SDimitry Andric if (K.second.size() == 1) {
636e3b55780SDimitry Andric KernelAccessVariables.insert(GV);
63708e8dd7bSDimitry Andric } else {
638e3b55780SDimitry Andric report_fatal_error(
639e3b55780SDimitry Andric "cannot lower LDS '" + GV->getName() +
640e3b55780SDimitry Andric "' to kernel access as it is reachable from multiple kernels");
641e3b55780SDimitry Andric }
642e3b55780SDimitry Andric break;
643e3b55780SDimitry Andric
644e3b55780SDimitry Andric case LoweringKind::hybrid: {
645e3b55780SDimitry Andric if (GV == HybridModuleRoot) {
646e3b55780SDimitry Andric assert(K.second.size() != 1);
647e3b55780SDimitry Andric ModuleScopeVariables.insert(GV);
648e3b55780SDimitry Andric } else if (K.second.size() == 1) {
649e3b55780SDimitry Andric KernelAccessVariables.insert(GV);
650e3b55780SDimitry Andric } else if (set_is_subset(K.second, HybridModuleRootKernels)) {
651e3b55780SDimitry Andric ModuleScopeVariables.insert(GV);
652e3b55780SDimitry Andric } else {
653e3b55780SDimitry Andric TableLookupVariables.insert(GV);
654e3b55780SDimitry Andric }
655e3b55780SDimitry Andric break;
656e3b55780SDimitry Andric }
657e3b55780SDimitry Andric }
658e3b55780SDimitry Andric }
659e3b55780SDimitry Andric
6607fa27ce4SDimitry Andric // All LDS variables accessed indirectly have now been partitioned into
6617fa27ce4SDimitry Andric // the distinct lowering strategies.
662e3b55780SDimitry Andric assert(ModuleScopeVariables.size() + TableLookupVariables.size() +
6637fa27ce4SDimitry Andric KernelAccessVariables.size() + DynamicVariables.size() ==
664e3b55780SDimitry Andric LDSToKernelsThatNeedToAccessItIndirectly.size());
6657fa27ce4SDimitry Andric }
666e3b55780SDimitry Andric
lowerModuleScopeStructVariables(Module & M,DenseSet<GlobalVariable * > const & ModuleScopeVariables,DenseSet<Function * > const & KernelsThatAllocateModuleLDS)6677fa27ce4SDimitry Andric static GlobalVariable *lowerModuleScopeStructVariables(
6687fa27ce4SDimitry Andric Module &M, DenseSet<GlobalVariable *> const &ModuleScopeVariables,
6697fa27ce4SDimitry Andric DenseSet<Function *> const &KernelsThatAllocateModuleLDS) {
6707fa27ce4SDimitry Andric // Create a struct to hold the ModuleScopeVariables
6717fa27ce4SDimitry Andric // Replace all uses of those variables from non-kernel functions with the
6727fa27ce4SDimitry Andric // new struct instance Replace only the uses from kernel functions that will
6737fa27ce4SDimitry Andric // allocate this instance. That is a space optimisation - kernels that use a
6747fa27ce4SDimitry Andric // subset of the module scope struct and do not need to allocate it for
6757fa27ce4SDimitry Andric // indirect calls will only allocate the subset they use (they do so as part
6767fa27ce4SDimitry Andric // of the per-kernel lowering).
6777fa27ce4SDimitry Andric if (ModuleScopeVariables.empty()) {
6787fa27ce4SDimitry Andric return nullptr;
6797fa27ce4SDimitry Andric }
680e3b55780SDimitry Andric
6817fa27ce4SDimitry Andric LLVMContext &Ctx = M.getContext();
6827fa27ce4SDimitry Andric
683e3b55780SDimitry Andric LDSVariableReplacement ModuleScopeReplacement =
684e3b55780SDimitry Andric createLDSVariableReplacement(M, "llvm.amdgcn.module.lds",
685e3b55780SDimitry Andric ModuleScopeVariables);
686e3b55780SDimitry Andric
6877fa27ce4SDimitry Andric appendToCompilerUsed(M, {static_cast<GlobalValue *>(
688e3b55780SDimitry Andric ConstantExpr::getPointerBitCastOrAddrSpaceCast(
689e3b55780SDimitry Andric cast<Constant>(ModuleScopeReplacement.SGV),
690b1c73532SDimitry Andric PointerType::getUnqual(Ctx)))});
691e3b55780SDimitry Andric
6927fa27ce4SDimitry Andric // module.lds will be allocated at zero in any kernel that allocates it
6937fa27ce4SDimitry Andric recordLDSAbsoluteAddress(&M, ModuleScopeReplacement.SGV, 0);
6947fa27ce4SDimitry Andric
695e3b55780SDimitry Andric // historic
696e3b55780SDimitry Andric removeLocalVarsFromUsedLists(M, ModuleScopeVariables);
697e3b55780SDimitry Andric
698e3b55780SDimitry Andric // Replace all uses of module scope variable from non-kernel functions
699e3b55780SDimitry Andric replaceLDSVariablesWithStruct(
700e3b55780SDimitry Andric M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
701e3b55780SDimitry Andric Instruction *I = dyn_cast<Instruction>(U.getUser());
702e3b55780SDimitry Andric if (!I) {
703e3b55780SDimitry Andric return false;
704e3b55780SDimitry Andric }
705e3b55780SDimitry Andric Function *F = I->getFunction();
706e3b55780SDimitry Andric return !isKernelLDS(F);
707e3b55780SDimitry Andric });
708e3b55780SDimitry Andric
709e3b55780SDimitry Andric // Replace uses of module scope variable from kernel functions that
710e3b55780SDimitry Andric // allocate the module scope variable, otherwise leave them unchanged
711e3b55780SDimitry Andric // Record on each kernel whether the module scope global is used by it
712e3b55780SDimitry Andric
713e3b55780SDimitry Andric for (Function &Func : M.functions()) {
714e3b55780SDimitry Andric if (Func.isDeclaration() || !isKernelLDS(&Func))
715e3b55780SDimitry Andric continue;
716e3b55780SDimitry Andric
717e3b55780SDimitry Andric if (KernelsThatAllocateModuleLDS.contains(&Func)) {
718e3b55780SDimitry Andric replaceLDSVariablesWithStruct(
719e3b55780SDimitry Andric M, ModuleScopeVariables, ModuleScopeReplacement, [&](Use &U) {
720e3b55780SDimitry Andric Instruction *I = dyn_cast<Instruction>(U.getUser());
721e3b55780SDimitry Andric if (!I) {
722e3b55780SDimitry Andric return false;
723e3b55780SDimitry Andric }
724e3b55780SDimitry Andric Function *F = I->getFunction();
725e3b55780SDimitry Andric return F == &Func;
726e3b55780SDimitry Andric });
727e3b55780SDimitry Andric
7287fa27ce4SDimitry Andric markUsedByKernel(&Func, ModuleScopeReplacement.SGV);
72908e8dd7bSDimitry Andric }
73008e8dd7bSDimitry Andric }
73108e8dd7bSDimitry Andric
7327fa27ce4SDimitry Andric return ModuleScopeReplacement.SGV;
7337fa27ce4SDimitry Andric }
7347fa27ce4SDimitry Andric
7357fa27ce4SDimitry Andric static DenseMap<Function *, LDSVariableReplacement>
lowerKernelScopeStructVariables(Module & M,LDSUsesInfoTy & LDSUsesInfo,DenseSet<GlobalVariable * > const & ModuleScopeVariables,DenseSet<Function * > const & KernelsThatAllocateModuleLDS,GlobalVariable * MaybeModuleScopeStruct)7367fa27ce4SDimitry Andric lowerKernelScopeStructVariables(
7377fa27ce4SDimitry Andric Module &M, LDSUsesInfoTy &LDSUsesInfo,
7387fa27ce4SDimitry Andric DenseSet<GlobalVariable *> const &ModuleScopeVariables,
7397fa27ce4SDimitry Andric DenseSet<Function *> const &KernelsThatAllocateModuleLDS,
7407fa27ce4SDimitry Andric GlobalVariable *MaybeModuleScopeStruct) {
7417fa27ce4SDimitry Andric
7427fa27ce4SDimitry Andric // Create a struct for each kernel for the non-module-scope variables.
7437fa27ce4SDimitry Andric
744e3b55780SDimitry Andric DenseMap<Function *, LDSVariableReplacement> KernelToReplacement;
745e3b55780SDimitry Andric for (Function &Func : M.functions()) {
746e3b55780SDimitry Andric if (Func.isDeclaration() || !isKernelLDS(&Func))
747c0981da4SDimitry Andric continue;
748c0981da4SDimitry Andric
749e3b55780SDimitry Andric DenseSet<GlobalVariable *> KernelUsedVariables;
7507fa27ce4SDimitry Andric // Allocating variables that are used directly in this struct to get
7517fa27ce4SDimitry Andric // alignment aware allocation and predictable frame size.
752e3b55780SDimitry Andric for (auto &v : LDSUsesInfo.direct_access[&Func]) {
7537fa27ce4SDimitry Andric if (!AMDGPU::isDynamicLDS(*v)) {
754e3b55780SDimitry Andric KernelUsedVariables.insert(v);
755e3b55780SDimitry Andric }
7567fa27ce4SDimitry Andric }
7577fa27ce4SDimitry Andric
7587fa27ce4SDimitry Andric // Allocating variables that are accessed indirectly so that a lookup of
7597fa27ce4SDimitry Andric // this struct instance can find them from nested functions.
760e3b55780SDimitry Andric for (auto &v : LDSUsesInfo.indirect_access[&Func]) {
7617fa27ce4SDimitry Andric if (!AMDGPU::isDynamicLDS(*v)) {
762e3b55780SDimitry Andric KernelUsedVariables.insert(v);
763e3b55780SDimitry Andric }
7647fa27ce4SDimitry Andric }
765e3b55780SDimitry Andric
766e3b55780SDimitry Andric // Variables allocated in module lds must all resolve to that struct,
767e3b55780SDimitry Andric // not to the per-kernel instance.
768e3b55780SDimitry Andric if (KernelsThatAllocateModuleLDS.contains(&Func)) {
769e3b55780SDimitry Andric for (GlobalVariable *v : ModuleScopeVariables) {
770e3b55780SDimitry Andric KernelUsedVariables.erase(v);
771e3b55780SDimitry Andric }
772e3b55780SDimitry Andric }
773e3b55780SDimitry Andric
774e3b55780SDimitry Andric if (KernelUsedVariables.empty()) {
7757fa27ce4SDimitry Andric // Either used no LDS, or the LDS it used was all in the module struct
7767fa27ce4SDimitry Andric // or dynamically sized
777344a3780SDimitry Andric continue;
77808e8dd7bSDimitry Andric }
77908e8dd7bSDimitry Andric
780e3b55780SDimitry Andric // The association between kernel function and LDS struct is done by
781e3b55780SDimitry Andric // symbol name, which only works if the function in question has a
782e3b55780SDimitry Andric // name This is not expected to be a problem in practice as kernels
783e3b55780SDimitry Andric // are called by name making anonymous ones (which are named by the
784e3b55780SDimitry Andric // backend) difficult to use. This does mean that llvm test cases need
785e3b55780SDimitry Andric // to name the kernels.
786e3b55780SDimitry Andric if (!Func.hasName()) {
787e3b55780SDimitry Andric report_fatal_error("Anonymous kernels cannot use LDS variables");
788e3b55780SDimitry Andric }
789e3b55780SDimitry Andric
79008e8dd7bSDimitry Andric std::string VarName =
791e3b55780SDimitry Andric (Twine("llvm.amdgcn.kernel.") + Func.getName() + ".lds").str();
792e3b55780SDimitry Andric
793e3b55780SDimitry Andric auto Replacement =
79408e8dd7bSDimitry Andric createLDSVariableReplacement(M, VarName, KernelUsedVariables);
79508e8dd7bSDimitry Andric
7967fa27ce4SDimitry Andric // If any indirect uses, create a direct use to ensure allocation
7977fa27ce4SDimitry Andric // TODO: Simpler to unconditionally mark used but that regresses
7987fa27ce4SDimitry Andric // codegen in test/CodeGen/AMDGPU/noclobber-barrier.ll
7997fa27ce4SDimitry Andric auto Accesses = LDSUsesInfo.indirect_access.find(&Func);
8007fa27ce4SDimitry Andric if ((Accesses != LDSUsesInfo.indirect_access.end()) &&
8017fa27ce4SDimitry Andric !Accesses->second.empty())
8027fa27ce4SDimitry Andric markUsedByKernel(&Func, Replacement.SGV);
8037fa27ce4SDimitry Andric
804e3b55780SDimitry Andric // remove preserves existing codegen
805e3b55780SDimitry Andric removeLocalVarsFromUsedLists(M, KernelUsedVariables);
806e3b55780SDimitry Andric KernelToReplacement[&Func] = Replacement;
807e3b55780SDimitry Andric
808e3b55780SDimitry Andric // Rewrite uses within kernel to the new struct
80908e8dd7bSDimitry Andric replaceLDSVariablesWithStruct(
810e3b55780SDimitry Andric M, KernelUsedVariables, Replacement, [&Func](Use &U) {
81108e8dd7bSDimitry Andric Instruction *I = dyn_cast<Instruction>(U.getUser());
812e3b55780SDimitry Andric return I && I->getFunction() == &Func;
81308e8dd7bSDimitry Andric });
81408e8dd7bSDimitry Andric }
8157fa27ce4SDimitry Andric return KernelToReplacement;
8167fa27ce4SDimitry Andric }
8177fa27ce4SDimitry Andric
8187fa27ce4SDimitry Andric static GlobalVariable *
buildRepresentativeDynamicLDSInstance(Module & M,LDSUsesInfoTy & LDSUsesInfo,Function * func)8197fa27ce4SDimitry Andric buildRepresentativeDynamicLDSInstance(Module &M, LDSUsesInfoTy &LDSUsesInfo,
8207fa27ce4SDimitry Andric Function *func) {
8217fa27ce4SDimitry Andric // Create a dynamic lds variable with a name associated with the passed
8227fa27ce4SDimitry Andric // function that has the maximum alignment of any dynamic lds variable
8237fa27ce4SDimitry Andric // reachable from this kernel. Dynamic LDS is allocated after the static LDS
8247fa27ce4SDimitry Andric // allocation, possibly after alignment padding. The representative variable
8257fa27ce4SDimitry Andric // created here has the maximum alignment of any other dynamic variable
8267fa27ce4SDimitry Andric // reachable by that kernel. All dynamic LDS variables are allocated at the
8277fa27ce4SDimitry Andric // same address in each kernel in order to provide the documented aliasing
8287fa27ce4SDimitry Andric // semantics. Setting the alignment here allows this IR pass to accurately
8297fa27ce4SDimitry Andric // predict the exact constant at which it will be allocated.
8307fa27ce4SDimitry Andric
8317fa27ce4SDimitry Andric assert(isKernelLDS(func));
8327fa27ce4SDimitry Andric
8337fa27ce4SDimitry Andric LLVMContext &Ctx = M.getContext();
8347fa27ce4SDimitry Andric const DataLayout &DL = M.getDataLayout();
8357fa27ce4SDimitry Andric Align MaxDynamicAlignment(1);
8367fa27ce4SDimitry Andric
8377fa27ce4SDimitry Andric auto UpdateMaxAlignment = [&MaxDynamicAlignment, &DL](GlobalVariable *GV) {
8387fa27ce4SDimitry Andric if (AMDGPU::isDynamicLDS(*GV)) {
8397fa27ce4SDimitry Andric MaxDynamicAlignment =
8407fa27ce4SDimitry Andric std::max(MaxDynamicAlignment, AMDGPU::getAlign(DL, GV));
8417fa27ce4SDimitry Andric }
8427fa27ce4SDimitry Andric };
8437fa27ce4SDimitry Andric
8447fa27ce4SDimitry Andric for (GlobalVariable *GV : LDSUsesInfo.indirect_access[func]) {
8457fa27ce4SDimitry Andric UpdateMaxAlignment(GV);
8467fa27ce4SDimitry Andric }
8477fa27ce4SDimitry Andric
8487fa27ce4SDimitry Andric for (GlobalVariable *GV : LDSUsesInfo.direct_access[func]) {
8497fa27ce4SDimitry Andric UpdateMaxAlignment(GV);
8507fa27ce4SDimitry Andric }
8517fa27ce4SDimitry Andric
8527fa27ce4SDimitry Andric assert(func->hasName()); // Checked by caller
8537fa27ce4SDimitry Andric auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
8547fa27ce4SDimitry Andric GlobalVariable *N = new GlobalVariable(
8557fa27ce4SDimitry Andric M, emptyCharArray, false, GlobalValue::ExternalLinkage, nullptr,
8567fa27ce4SDimitry Andric Twine("llvm.amdgcn." + func->getName() + ".dynlds"), nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
8577fa27ce4SDimitry Andric false);
8587fa27ce4SDimitry Andric N->setAlignment(MaxDynamicAlignment);
8597fa27ce4SDimitry Andric
8607fa27ce4SDimitry Andric assert(AMDGPU::isDynamicLDS(*N));
8617fa27ce4SDimitry Andric return N;
8627fa27ce4SDimitry Andric }
8637fa27ce4SDimitry Andric
lowerDynamicLDSVariables(Module & M,LDSUsesInfoTy & LDSUsesInfo,DenseSet<Function * > const & KernelsThatIndirectlyAllocateDynamicLDS,DenseSet<GlobalVariable * > const & DynamicVariables,std::vector<Function * > const & OrderedKernels)8647fa27ce4SDimitry Andric DenseMap<Function *, GlobalVariable *> lowerDynamicLDSVariables(
8657fa27ce4SDimitry Andric Module &M, LDSUsesInfoTy &LDSUsesInfo,
8667fa27ce4SDimitry Andric DenseSet<Function *> const &KernelsThatIndirectlyAllocateDynamicLDS,
8677fa27ce4SDimitry Andric DenseSet<GlobalVariable *> const &DynamicVariables,
8687fa27ce4SDimitry Andric std::vector<Function *> const &OrderedKernels) {
8697fa27ce4SDimitry Andric DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS;
8707fa27ce4SDimitry Andric if (!KernelsThatIndirectlyAllocateDynamicLDS.empty()) {
8717fa27ce4SDimitry Andric LLVMContext &Ctx = M.getContext();
8727fa27ce4SDimitry Andric IRBuilder<> Builder(Ctx);
8737fa27ce4SDimitry Andric Type *I32 = Type::getInt32Ty(Ctx);
8747fa27ce4SDimitry Andric
8757fa27ce4SDimitry Andric std::vector<Constant *> newDynamicLDS;
8767fa27ce4SDimitry Andric
8777fa27ce4SDimitry Andric // Table is built in the same order as OrderedKernels
8787fa27ce4SDimitry Andric for (auto &func : OrderedKernels) {
8797fa27ce4SDimitry Andric
8807fa27ce4SDimitry Andric if (KernelsThatIndirectlyAllocateDynamicLDS.contains(func)) {
8817fa27ce4SDimitry Andric assert(isKernelLDS(func));
8827fa27ce4SDimitry Andric if (!func->hasName()) {
8837fa27ce4SDimitry Andric report_fatal_error("Anonymous kernels cannot use LDS variables");
8847fa27ce4SDimitry Andric }
8857fa27ce4SDimitry Andric
8867fa27ce4SDimitry Andric GlobalVariable *N =
8877fa27ce4SDimitry Andric buildRepresentativeDynamicLDSInstance(M, LDSUsesInfo, func);
8887fa27ce4SDimitry Andric
8897fa27ce4SDimitry Andric KernelToCreatedDynamicLDS[func] = N;
8907fa27ce4SDimitry Andric
8917fa27ce4SDimitry Andric markUsedByKernel(func, N);
8927fa27ce4SDimitry Andric
8937fa27ce4SDimitry Andric auto emptyCharArray = ArrayType::get(Type::getInt8Ty(Ctx), 0);
8947fa27ce4SDimitry Andric auto GEP = ConstantExpr::getGetElementPtr(
8957fa27ce4SDimitry Andric emptyCharArray, N, ConstantInt::get(I32, 0), true);
8967fa27ce4SDimitry Andric newDynamicLDS.push_back(ConstantExpr::getPtrToInt(GEP, I32));
8977fa27ce4SDimitry Andric } else {
8987fa27ce4SDimitry Andric newDynamicLDS.push_back(PoisonValue::get(I32));
8997fa27ce4SDimitry Andric }
9007fa27ce4SDimitry Andric }
9017fa27ce4SDimitry Andric assert(OrderedKernels.size() == newDynamicLDS.size());
9027fa27ce4SDimitry Andric
9037fa27ce4SDimitry Andric ArrayType *t = ArrayType::get(I32, newDynamicLDS.size());
9047fa27ce4SDimitry Andric Constant *init = ConstantArray::get(t, newDynamicLDS);
9057fa27ce4SDimitry Andric GlobalVariable *table = new GlobalVariable(
9067fa27ce4SDimitry Andric M, t, true, GlobalValue::InternalLinkage, init,
9077fa27ce4SDimitry Andric "llvm.amdgcn.dynlds.offset.table", nullptr,
9087fa27ce4SDimitry Andric GlobalValue::NotThreadLocal, AMDGPUAS::CONSTANT_ADDRESS);
9097fa27ce4SDimitry Andric
9107fa27ce4SDimitry Andric for (GlobalVariable *GV : DynamicVariables) {
9117fa27ce4SDimitry Andric for (Use &U : make_early_inc_range(GV->uses())) {
9127fa27ce4SDimitry Andric auto *I = dyn_cast<Instruction>(U.getUser());
9137fa27ce4SDimitry Andric if (!I)
9147fa27ce4SDimitry Andric continue;
9157fa27ce4SDimitry Andric if (isKernelLDS(I->getFunction()))
9167fa27ce4SDimitry Andric continue;
9177fa27ce4SDimitry Andric
9187fa27ce4SDimitry Andric replaceUseWithTableLookup(M, Builder, table, GV, U, nullptr);
9197fa27ce4SDimitry Andric }
9207fa27ce4SDimitry Andric }
9217fa27ce4SDimitry Andric }
9227fa27ce4SDimitry Andric return KernelToCreatedDynamicLDS;
9237fa27ce4SDimitry Andric }
9247fa27ce4SDimitry Andric
runOnModule(Module & M)925b1c73532SDimitry Andric bool runOnModule(Module &M) {
9267fa27ce4SDimitry Andric CallGraph CG = CallGraph(M);
9277fa27ce4SDimitry Andric bool Changed = superAlignLDSGlobals(M);
9287fa27ce4SDimitry Andric
9297fa27ce4SDimitry Andric Changed |= eliminateConstantExprUsesOfLDSFromAllInstructions(M);
9307fa27ce4SDimitry Andric
9317fa27ce4SDimitry Andric Changed = true; // todo: narrow this down
9327fa27ce4SDimitry Andric
9337fa27ce4SDimitry Andric // For each kernel, what variables does it access directly or through
9347fa27ce4SDimitry Andric // callees
9357fa27ce4SDimitry Andric LDSUsesInfoTy LDSUsesInfo = getTransitiveUsesOfLDS(CG, M);
9367fa27ce4SDimitry Andric
9377fa27ce4SDimitry Andric // For each variable accessed through callees, which kernels access it
9387fa27ce4SDimitry Andric VariableFunctionMap LDSToKernelsThatNeedToAccessItIndirectly;
9397fa27ce4SDimitry Andric for (auto &K : LDSUsesInfo.indirect_access) {
9407fa27ce4SDimitry Andric Function *F = K.first;
9417fa27ce4SDimitry Andric assert(isKernelLDS(F));
9427fa27ce4SDimitry Andric for (GlobalVariable *GV : K.second) {
9437fa27ce4SDimitry Andric LDSToKernelsThatNeedToAccessItIndirectly[GV].insert(F);
9447fa27ce4SDimitry Andric }
9457fa27ce4SDimitry Andric }
9467fa27ce4SDimitry Andric
9477fa27ce4SDimitry Andric // Partition variables accessed indirectly into the different strategies
9487fa27ce4SDimitry Andric DenseSet<GlobalVariable *> ModuleScopeVariables;
9497fa27ce4SDimitry Andric DenseSet<GlobalVariable *> TableLookupVariables;
9507fa27ce4SDimitry Andric DenseSet<GlobalVariable *> KernelAccessVariables;
9517fa27ce4SDimitry Andric DenseSet<GlobalVariable *> DynamicVariables;
9527fa27ce4SDimitry Andric partitionVariablesIntoIndirectStrategies(
9537fa27ce4SDimitry Andric M, LDSUsesInfo, LDSToKernelsThatNeedToAccessItIndirectly,
9547fa27ce4SDimitry Andric ModuleScopeVariables, TableLookupVariables, KernelAccessVariables,
9557fa27ce4SDimitry Andric DynamicVariables);
9567fa27ce4SDimitry Andric
9577fa27ce4SDimitry Andric // If the kernel accesses a variable that is going to be stored in the
9587fa27ce4SDimitry Andric // module instance through a call then that kernel needs to allocate the
9597fa27ce4SDimitry Andric // module instance
9607fa27ce4SDimitry Andric const DenseSet<Function *> KernelsThatAllocateModuleLDS =
9617fa27ce4SDimitry Andric kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
9627fa27ce4SDimitry Andric ModuleScopeVariables);
9637fa27ce4SDimitry Andric const DenseSet<Function *> KernelsThatAllocateTableLDS =
9647fa27ce4SDimitry Andric kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
9657fa27ce4SDimitry Andric TableLookupVariables);
9667fa27ce4SDimitry Andric
9677fa27ce4SDimitry Andric const DenseSet<Function *> KernelsThatIndirectlyAllocateDynamicLDS =
9687fa27ce4SDimitry Andric kernelsThatIndirectlyAccessAnyOfPassedVariables(M, LDSUsesInfo,
9697fa27ce4SDimitry Andric DynamicVariables);
9707fa27ce4SDimitry Andric
9717fa27ce4SDimitry Andric GlobalVariable *MaybeModuleScopeStruct = lowerModuleScopeStructVariables(
9727fa27ce4SDimitry Andric M, ModuleScopeVariables, KernelsThatAllocateModuleLDS);
9737fa27ce4SDimitry Andric
9747fa27ce4SDimitry Andric DenseMap<Function *, LDSVariableReplacement> KernelToReplacement =
9757fa27ce4SDimitry Andric lowerKernelScopeStructVariables(M, LDSUsesInfo, ModuleScopeVariables,
9767fa27ce4SDimitry Andric KernelsThatAllocateModuleLDS,
9777fa27ce4SDimitry Andric MaybeModuleScopeStruct);
978e3b55780SDimitry Andric
979e3b55780SDimitry Andric // Lower zero cost accesses to the kernel instances just created
980e3b55780SDimitry Andric for (auto &GV : KernelAccessVariables) {
981e3b55780SDimitry Andric auto &funcs = LDSToKernelsThatNeedToAccessItIndirectly[GV];
982e3b55780SDimitry Andric assert(funcs.size() == 1); // Only one kernel can access it
983e3b55780SDimitry Andric LDSVariableReplacement Replacement =
984e3b55780SDimitry Andric KernelToReplacement[*(funcs.begin())];
985e3b55780SDimitry Andric
986e3b55780SDimitry Andric DenseSet<GlobalVariable *> Vec;
987e3b55780SDimitry Andric Vec.insert(GV);
988e3b55780SDimitry Andric
989e3b55780SDimitry Andric replaceLDSVariablesWithStruct(M, Vec, Replacement, [](Use &U) {
990e3b55780SDimitry Andric return isa<Instruction>(U.getUser());
991e3b55780SDimitry Andric });
992e3b55780SDimitry Andric }
993e3b55780SDimitry Andric
9947fa27ce4SDimitry Andric // The ith element of this vector is kernel id i
9957fa27ce4SDimitry Andric std::vector<Function *> OrderedKernels =
9967fa27ce4SDimitry Andric assignLDSKernelIDToEachKernel(&M, KernelsThatAllocateTableLDS,
9977fa27ce4SDimitry Andric KernelsThatIndirectlyAllocateDynamicLDS);
9987fa27ce4SDimitry Andric
999e3b55780SDimitry Andric if (!KernelsThatAllocateTableLDS.empty()) {
1000e3b55780SDimitry Andric LLVMContext &Ctx = M.getContext();
1001e3b55780SDimitry Andric IRBuilder<> Builder(Ctx);
1002e3b55780SDimitry Andric
1003e3b55780SDimitry Andric // The order must be consistent between lookup table and accesses to
1004e3b55780SDimitry Andric // lookup table
10057fa27ce4SDimitry Andric auto TableLookupVariablesOrdered =
10067fa27ce4SDimitry Andric sortByName(std::vector<GlobalVariable *>(TableLookupVariables.begin(),
10077fa27ce4SDimitry Andric TableLookupVariables.end()));
1008e3b55780SDimitry Andric
1009e3b55780SDimitry Andric GlobalVariable *LookupTable = buildLookupTable(
1010e3b55780SDimitry Andric M, TableLookupVariablesOrdered, OrderedKernels, KernelToReplacement);
1011e3b55780SDimitry Andric replaceUsesInInstructionsWithTableLookup(M, TableLookupVariablesOrdered,
1012e3b55780SDimitry Andric LookupTable);
1013950076cdSDimitry Andric
1014950076cdSDimitry Andric // Strip amdgpu-no-lds-kernel-id from all functions reachable from the
1015950076cdSDimitry Andric // kernel. We may have inferred this wasn't used prior to the pass.
1016950076cdSDimitry Andric //
1017950076cdSDimitry Andric // TODO: We could filter out subgraphs that do not access LDS globals.
1018950076cdSDimitry Andric for (Function *F : KernelsThatAllocateTableLDS)
1019ac9a064cSDimitry Andric removeFnAttrFromReachable(CG, F, {"amdgpu-no-lds-kernel-id"});
1020e3b55780SDimitry Andric }
1021e3b55780SDimitry Andric
10227fa27ce4SDimitry Andric DenseMap<Function *, GlobalVariable *> KernelToCreatedDynamicLDS =
10237fa27ce4SDimitry Andric lowerDynamicLDSVariables(M, LDSUsesInfo,
10247fa27ce4SDimitry Andric KernelsThatIndirectlyAllocateDynamicLDS,
10257fa27ce4SDimitry Andric DynamicVariables, OrderedKernels);
10267fa27ce4SDimitry Andric
10277fa27ce4SDimitry Andric // All kernel frames have been allocated. Calculate and record the
10287fa27ce4SDimitry Andric // addresses.
10297fa27ce4SDimitry Andric {
10307fa27ce4SDimitry Andric const DataLayout &DL = M.getDataLayout();
10317fa27ce4SDimitry Andric
10327fa27ce4SDimitry Andric for (Function &Func : M.functions()) {
10337fa27ce4SDimitry Andric if (Func.isDeclaration() || !isKernelLDS(&Func))
10347fa27ce4SDimitry Andric continue;
10357fa27ce4SDimitry Andric
10367fa27ce4SDimitry Andric // All three of these are optional. The first variable is allocated at
10377fa27ce4SDimitry Andric // zero. They are allocated by AMDGPUMachineFunction as one block.
10387fa27ce4SDimitry Andric // Layout:
10397fa27ce4SDimitry Andric //{
10407fa27ce4SDimitry Andric // module.lds
10417fa27ce4SDimitry Andric // alignment padding
10427fa27ce4SDimitry Andric // kernel instance
10437fa27ce4SDimitry Andric // alignment padding
10447fa27ce4SDimitry Andric // dynamic lds variables
10457fa27ce4SDimitry Andric //}
10467fa27ce4SDimitry Andric
10477fa27ce4SDimitry Andric const bool AllocateModuleScopeStruct =
10487fa27ce4SDimitry Andric MaybeModuleScopeStruct &&
10497fa27ce4SDimitry Andric KernelsThatAllocateModuleLDS.contains(&Func);
10507fa27ce4SDimitry Andric
10517fa27ce4SDimitry Andric auto Replacement = KernelToReplacement.find(&Func);
10527fa27ce4SDimitry Andric const bool AllocateKernelScopeStruct =
10537fa27ce4SDimitry Andric Replacement != KernelToReplacement.end();
10547fa27ce4SDimitry Andric
10557fa27ce4SDimitry Andric const bool AllocateDynamicVariable =
10567fa27ce4SDimitry Andric KernelToCreatedDynamicLDS.contains(&Func);
10577fa27ce4SDimitry Andric
10587fa27ce4SDimitry Andric uint32_t Offset = 0;
10597fa27ce4SDimitry Andric
10607fa27ce4SDimitry Andric if (AllocateModuleScopeStruct) {
10617fa27ce4SDimitry Andric // Allocated at zero, recorded once on construction, not once per
10627fa27ce4SDimitry Andric // kernel
10637fa27ce4SDimitry Andric Offset += DL.getTypeAllocSize(MaybeModuleScopeStruct->getValueType());
10647fa27ce4SDimitry Andric }
10657fa27ce4SDimitry Andric
10667fa27ce4SDimitry Andric if (AllocateKernelScopeStruct) {
10677fa27ce4SDimitry Andric GlobalVariable *KernelStruct = Replacement->second.SGV;
10687fa27ce4SDimitry Andric Offset = alignTo(Offset, AMDGPU::getAlign(DL, KernelStruct));
10697fa27ce4SDimitry Andric recordLDSAbsoluteAddress(&M, KernelStruct, Offset);
10707fa27ce4SDimitry Andric Offset += DL.getTypeAllocSize(KernelStruct->getValueType());
10717fa27ce4SDimitry Andric }
10727fa27ce4SDimitry Andric
10737fa27ce4SDimitry Andric // If there is dynamic allocation, the alignment needed is included in
10747fa27ce4SDimitry Andric // the static frame size. There may be no reference to the dynamic
10757fa27ce4SDimitry Andric // variable in the kernel itself, so without including it here, that
10767fa27ce4SDimitry Andric // alignment padding could be missed.
10777fa27ce4SDimitry Andric if (AllocateDynamicVariable) {
10787fa27ce4SDimitry Andric GlobalVariable *DynamicVariable = KernelToCreatedDynamicLDS[&Func];
10797fa27ce4SDimitry Andric Offset = alignTo(Offset, AMDGPU::getAlign(DL, DynamicVariable));
10807fa27ce4SDimitry Andric recordLDSAbsoluteAddress(&M, DynamicVariable, Offset);
10817fa27ce4SDimitry Andric }
10827fa27ce4SDimitry Andric
10837fa27ce4SDimitry Andric if (Offset != 0) {
1084b1c73532SDimitry Andric (void)TM; // TODO: Account for target maximum LDS
10857fa27ce4SDimitry Andric std::string Buffer;
10867fa27ce4SDimitry Andric raw_string_ostream SS{Buffer};
10877fa27ce4SDimitry Andric SS << format("%u", Offset);
10887fa27ce4SDimitry Andric
1089ac9a064cSDimitry Andric // Instead of explicitly marking kernels that access dynamic variables
10907fa27ce4SDimitry Andric // using special case metadata, annotate with min-lds == max-lds, i.e.
10917fa27ce4SDimitry Andric // that there is no more space available for allocating more static
10927fa27ce4SDimitry Andric // LDS variables. That is the right condition to prevent allocating
10937fa27ce4SDimitry Andric // more variables which would collide with the addresses assigned to
10947fa27ce4SDimitry Andric // dynamic variables.
10957fa27ce4SDimitry Andric if (AllocateDynamicVariable)
10967fa27ce4SDimitry Andric SS << format(",%u", Offset);
10977fa27ce4SDimitry Andric
10987fa27ce4SDimitry Andric Func.addFnAttr("amdgpu-lds-size", Buffer);
10997fa27ce4SDimitry Andric }
11007fa27ce4SDimitry Andric }
11017fa27ce4SDimitry Andric }
11027fa27ce4SDimitry Andric
1103e3b55780SDimitry Andric for (auto &GV : make_early_inc_range(M.globals()))
1104e3b55780SDimitry Andric if (AMDGPU::isLDSVariableToLower(GV)) {
1105e3b55780SDimitry Andric // probably want to remove from used lists
1106e3b55780SDimitry Andric GV.removeDeadConstantUsers();
1107e3b55780SDimitry Andric if (GV.use_empty())
1108e3b55780SDimitry Andric GV.eraseFromParent();
1109344a3780SDimitry Andric }
1110344a3780SDimitry Andric
1111344a3780SDimitry Andric return Changed;
1112344a3780SDimitry Andric }
1113344a3780SDimitry Andric
1114344a3780SDimitry Andric private:
1115344a3780SDimitry Andric // Increase the alignment of LDS globals if necessary to maximise the chance
1116344a3780SDimitry Andric // that we can use aligned LDS instructions to access them.
superAlignLDSGlobals(Module & M)111777fc4c14SDimitry Andric static bool superAlignLDSGlobals(Module &M) {
111877fc4c14SDimitry Andric const DataLayout &DL = M.getDataLayout();
111977fc4c14SDimitry Andric bool Changed = false;
112077fc4c14SDimitry Andric if (!SuperAlignLDSGlobals) {
112177fc4c14SDimitry Andric return Changed;
112277fc4c14SDimitry Andric }
112377fc4c14SDimitry Andric
112477fc4c14SDimitry Andric for (auto &GV : M.globals()) {
112577fc4c14SDimitry Andric if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
112677fc4c14SDimitry Andric // Only changing alignment of LDS variables
112777fc4c14SDimitry Andric continue;
112877fc4c14SDimitry Andric }
112977fc4c14SDimitry Andric if (!GV.hasInitializer()) {
113077fc4c14SDimitry Andric // cuda/hip extern __shared__ variable, leave alignment alone
113177fc4c14SDimitry Andric continue;
113277fc4c14SDimitry Andric }
113377fc4c14SDimitry Andric
113477fc4c14SDimitry Andric Align Alignment = AMDGPU::getAlign(DL, &GV);
113577fc4c14SDimitry Andric TypeSize GVSize = DL.getTypeAllocSize(GV.getValueType());
1136344a3780SDimitry Andric
1137344a3780SDimitry Andric if (GVSize > 8) {
1138344a3780SDimitry Andric // We might want to use a b96 or b128 load/store
1139344a3780SDimitry Andric Alignment = std::max(Alignment, Align(16));
1140344a3780SDimitry Andric } else if (GVSize > 4) {
1141344a3780SDimitry Andric // We might want to use a b64 load/store
1142344a3780SDimitry Andric Alignment = std::max(Alignment, Align(8));
1143344a3780SDimitry Andric } else if (GVSize > 2) {
1144344a3780SDimitry Andric // We might want to use a b32 load/store
1145344a3780SDimitry Andric Alignment = std::max(Alignment, Align(4));
1146344a3780SDimitry Andric } else if (GVSize > 1) {
1147344a3780SDimitry Andric // We might want to use a b16 load/store
1148344a3780SDimitry Andric Alignment = std::max(Alignment, Align(2));
1149344a3780SDimitry Andric }
1150344a3780SDimitry Andric
115177fc4c14SDimitry Andric if (Alignment != AMDGPU::getAlign(DL, &GV)) {
115277fc4c14SDimitry Andric Changed = true;
115377fc4c14SDimitry Andric GV.setAlignment(Alignment);
1154344a3780SDimitry Andric }
1155344a3780SDimitry Andric }
115677fc4c14SDimitry Andric return Changed;
115777fc4c14SDimitry Andric }
115877fc4c14SDimitry Andric
createLDSVariableReplacement(Module & M,std::string VarName,DenseSet<GlobalVariable * > const & LDSVarsToTransform)1159e3b55780SDimitry Andric static LDSVariableReplacement createLDSVariableReplacement(
116008e8dd7bSDimitry Andric Module &M, std::string VarName,
1161e3b55780SDimitry Andric DenseSet<GlobalVariable *> const &LDSVarsToTransform) {
116208e8dd7bSDimitry Andric // Create a struct instance containing LDSVarsToTransform and map from those
116308e8dd7bSDimitry Andric // variables to ConstantExprGEP
116408e8dd7bSDimitry Andric // Variables may be introduced to meet alignment requirements. No aliasing
116508e8dd7bSDimitry Andric // metadata is useful for these as they have no uses. Erased before return.
116608e8dd7bSDimitry Andric
116777fc4c14SDimitry Andric LLVMContext &Ctx = M.getContext();
116877fc4c14SDimitry Andric const DataLayout &DL = M.getDataLayout();
116908e8dd7bSDimitry Andric assert(!LDSVarsToTransform.empty());
1170344a3780SDimitry Andric
1171344a3780SDimitry Andric SmallVector<OptimizedStructLayoutField, 8> LayoutFields;
11724b4fe385SDimitry Andric LayoutFields.reserve(LDSVarsToTransform.size());
1173e3b55780SDimitry Andric {
1174e3b55780SDimitry Andric // The order of fields in this struct depends on the order of
1175ac9a064cSDimitry Andric // variables in the argument which varies when changing how they
1176e3b55780SDimitry Andric // are identified, leading to spurious test breakage.
11777fa27ce4SDimitry Andric auto Sorted = sortByName(std::vector<GlobalVariable *>(
11787fa27ce4SDimitry Andric LDSVarsToTransform.begin(), LDSVarsToTransform.end()));
11797fa27ce4SDimitry Andric
1180e3b55780SDimitry Andric for (GlobalVariable *GV : Sorted) {
1181e3b55780SDimitry Andric OptimizedStructLayoutField F(GV,
1182e3b55780SDimitry Andric DL.getTypeAllocSize(GV->getValueType()),
1183344a3780SDimitry Andric AMDGPU::getAlign(DL, GV));
1184344a3780SDimitry Andric LayoutFields.emplace_back(F);
1185344a3780SDimitry Andric }
1186e3b55780SDimitry Andric }
1187344a3780SDimitry Andric
1188344a3780SDimitry Andric performOptimizedStructLayout(LayoutFields);
1189344a3780SDimitry Andric
1190344a3780SDimitry Andric std::vector<GlobalVariable *> LocalVars;
119108e8dd7bSDimitry Andric BitVector IsPaddingField;
11924b4fe385SDimitry Andric LocalVars.reserve(LDSVarsToTransform.size()); // will be at least this large
119308e8dd7bSDimitry Andric IsPaddingField.reserve(LDSVarsToTransform.size());
1194344a3780SDimitry Andric {
1195344a3780SDimitry Andric uint64_t CurrentOffset = 0;
1196ac9a064cSDimitry Andric for (auto &F : LayoutFields) {
1197ac9a064cSDimitry Andric GlobalVariable *FGV =
1198ac9a064cSDimitry Andric static_cast<GlobalVariable *>(const_cast<void *>(F.Id));
1199ac9a064cSDimitry Andric Align DataAlign = F.Alignment;
1200344a3780SDimitry Andric
1201344a3780SDimitry Andric uint64_t DataAlignV = DataAlign.value();
1202344a3780SDimitry Andric if (uint64_t Rem = CurrentOffset % DataAlignV) {
1203344a3780SDimitry Andric uint64_t Padding = DataAlignV - Rem;
1204344a3780SDimitry Andric
1205344a3780SDimitry Andric // Append an array of padding bytes to meet alignment requested
1206344a3780SDimitry Andric // Note (o + (a - (o % a)) ) % a == 0
1207344a3780SDimitry Andric // (offset + Padding ) % align == 0
1208344a3780SDimitry Andric
1209344a3780SDimitry Andric Type *ATy = ArrayType::get(Type::getInt8Ty(Ctx), Padding);
1210344a3780SDimitry Andric LocalVars.push_back(new GlobalVariable(
1211b1c73532SDimitry Andric M, ATy, false, GlobalValue::InternalLinkage,
1212b1c73532SDimitry Andric PoisonValue::get(ATy), "", nullptr, GlobalValue::NotThreadLocal,
1213b1c73532SDimitry Andric AMDGPUAS::LOCAL_ADDRESS, false));
121408e8dd7bSDimitry Andric IsPaddingField.push_back(true);
1215344a3780SDimitry Andric CurrentOffset += Padding;
1216344a3780SDimitry Andric }
1217344a3780SDimitry Andric
1218344a3780SDimitry Andric LocalVars.push_back(FGV);
121908e8dd7bSDimitry Andric IsPaddingField.push_back(false);
1220ac9a064cSDimitry Andric CurrentOffset += F.Size;
1221344a3780SDimitry Andric }
1222344a3780SDimitry Andric }
1223344a3780SDimitry Andric
1224344a3780SDimitry Andric std::vector<Type *> LocalVarTypes;
1225344a3780SDimitry Andric LocalVarTypes.reserve(LocalVars.size());
1226344a3780SDimitry Andric std::transform(
1227344a3780SDimitry Andric LocalVars.cbegin(), LocalVars.cend(), std::back_inserter(LocalVarTypes),
1228344a3780SDimitry Andric [](const GlobalVariable *V) -> Type * { return V->getValueType(); });
1229344a3780SDimitry Andric
1230344a3780SDimitry Andric StructType *LDSTy = StructType::create(Ctx, LocalVarTypes, VarName + ".t");
1231344a3780SDimitry Andric
1232e3b55780SDimitry Andric Align StructAlign = AMDGPU::getAlign(DL, LocalVars[0]);
1233344a3780SDimitry Andric
1234344a3780SDimitry Andric GlobalVariable *SGV = new GlobalVariable(
1235b1c73532SDimitry Andric M, LDSTy, false, GlobalValue::InternalLinkage, PoisonValue::get(LDSTy),
1236344a3780SDimitry Andric VarName, nullptr, GlobalValue::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS,
1237344a3780SDimitry Andric false);
1238344a3780SDimitry Andric SGV->setAlignment(StructAlign);
123908e8dd7bSDimitry Andric
124008e8dd7bSDimitry Andric DenseMap<GlobalVariable *, Constant *> Map;
124108e8dd7bSDimitry Andric Type *I32 = Type::getInt32Ty(Ctx);
124208e8dd7bSDimitry Andric for (size_t I = 0; I < LocalVars.size(); I++) {
124308e8dd7bSDimitry Andric GlobalVariable *GV = LocalVars[I];
124408e8dd7bSDimitry Andric Constant *GEPIdx[] = {ConstantInt::get(I32, 0), ConstantInt::get(I32, I)};
124508e8dd7bSDimitry Andric Constant *GEP = ConstantExpr::getGetElementPtr(LDSTy, SGV, GEPIdx, true);
124608e8dd7bSDimitry Andric if (IsPaddingField[I]) {
124708e8dd7bSDimitry Andric assert(GV->use_empty());
124808e8dd7bSDimitry Andric GV->eraseFromParent();
124908e8dd7bSDimitry Andric } else {
125008e8dd7bSDimitry Andric Map[GV] = GEP;
125108e8dd7bSDimitry Andric }
125208e8dd7bSDimitry Andric }
125308e8dd7bSDimitry Andric assert(Map.size() == LDSVarsToTransform.size());
125408e8dd7bSDimitry Andric return {SGV, std::move(Map)};
1255344a3780SDimitry Andric }
1256344a3780SDimitry Andric
125708e8dd7bSDimitry Andric template <typename PredicateTy>
replaceLDSVariablesWithStruct(Module & M,DenseSet<GlobalVariable * > const & LDSVarsToTransformArg,const LDSVariableReplacement & Replacement,PredicateTy Predicate)12587fa27ce4SDimitry Andric static void replaceLDSVariablesWithStruct(
1259e3b55780SDimitry Andric Module &M, DenseSet<GlobalVariable *> const &LDSVarsToTransformArg,
12607fa27ce4SDimitry Andric const LDSVariableReplacement &Replacement, PredicateTy Predicate) {
126108e8dd7bSDimitry Andric LLVMContext &Ctx = M.getContext();
126208e8dd7bSDimitry Andric const DataLayout &DL = M.getDataLayout();
1263344a3780SDimitry Andric
1264e3b55780SDimitry Andric // A hack... we need to insert the aliasing info in a predictable order for
1265e3b55780SDimitry Andric // lit tests. Would like to have them in a stable order already, ideally the
1266e3b55780SDimitry Andric // same order they get allocated, which might mean an ordered set container
12677fa27ce4SDimitry Andric auto LDSVarsToTransform = sortByName(std::vector<GlobalVariable *>(
12687fa27ce4SDimitry Andric LDSVarsToTransformArg.begin(), LDSVarsToTransformArg.end()));
1269e3b55780SDimitry Andric
1270c0981da4SDimitry Andric // Create alias.scope and their lists. Each field in the new structure
1271c0981da4SDimitry Andric // does not alias with all other fields.
1272c0981da4SDimitry Andric SmallVector<MDNode *> AliasScopes;
1273c0981da4SDimitry Andric SmallVector<Metadata *> NoAliasList;
127408e8dd7bSDimitry Andric const size_t NumberVars = LDSVarsToTransform.size();
127508e8dd7bSDimitry Andric if (NumberVars > 1) {
1276c0981da4SDimitry Andric MDBuilder MDB(Ctx);
127708e8dd7bSDimitry Andric AliasScopes.reserve(NumberVars);
1278c0981da4SDimitry Andric MDNode *Domain = MDB.createAnonymousAliasScopeDomain();
127908e8dd7bSDimitry Andric for (size_t I = 0; I < NumberVars; I++) {
1280c0981da4SDimitry Andric MDNode *Scope = MDB.createAnonymousAliasScope(Domain);
1281c0981da4SDimitry Andric AliasScopes.push_back(Scope);
1282c0981da4SDimitry Andric }
1283c0981da4SDimitry Andric NoAliasList.append(&AliasScopes[1], AliasScopes.end());
1284c0981da4SDimitry Andric }
1285c0981da4SDimitry Andric
128608e8dd7bSDimitry Andric // Replace uses of ith variable with a constantexpr to the corresponding
128708e8dd7bSDimitry Andric // field of the instance that will be allocated by AMDGPUMachineFunction
128808e8dd7bSDimitry Andric for (size_t I = 0; I < NumberVars; I++) {
128908e8dd7bSDimitry Andric GlobalVariable *GV = LDSVarsToTransform[I];
12907fa27ce4SDimitry Andric Constant *GEP = Replacement.LDSVarsToConstantGEP.at(GV);
1291344a3780SDimitry Andric
129208e8dd7bSDimitry Andric GV->replaceUsesWithIf(GEP, Predicate);
1293344a3780SDimitry Andric
129408e8dd7bSDimitry Andric APInt APOff(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
129508e8dd7bSDimitry Andric GEP->stripAndAccumulateInBoundsConstantOffsets(DL, APOff);
129608e8dd7bSDimitry Andric uint64_t Offset = APOff.getZExtValue();
129708e8dd7bSDimitry Andric
1298e3b55780SDimitry Andric Align A =
1299e3b55780SDimitry Andric commonAlignment(Replacement.SGV->getAlign().valueOrOne(), Offset);
1300c0981da4SDimitry Andric
1301c0981da4SDimitry Andric if (I)
1302c0981da4SDimitry Andric NoAliasList[I - 1] = AliasScopes[I - 1];
1303c0981da4SDimitry Andric MDNode *NoAlias =
1304c0981da4SDimitry Andric NoAliasList.empty() ? nullptr : MDNode::get(Ctx, NoAliasList);
1305c0981da4SDimitry Andric MDNode *AliasScope =
1306c0981da4SDimitry Andric AliasScopes.empty() ? nullptr : MDNode::get(Ctx, {AliasScopes[I]});
1307c0981da4SDimitry Andric
1308c0981da4SDimitry Andric refineUsesAlignmentAndAA(GEP, A, DL, AliasScope, NoAlias);
1309344a3780SDimitry Andric }
1310344a3780SDimitry Andric }
1311344a3780SDimitry Andric
refineUsesAlignmentAndAA(Value * Ptr,Align A,const DataLayout & DL,MDNode * AliasScope,MDNode * NoAlias,unsigned MaxDepth=5)13127fa27ce4SDimitry Andric static void refineUsesAlignmentAndAA(Value *Ptr, Align A,
13137fa27ce4SDimitry Andric const DataLayout &DL, MDNode *AliasScope,
13147fa27ce4SDimitry Andric MDNode *NoAlias, unsigned MaxDepth = 5) {
1315c0981da4SDimitry Andric if (!MaxDepth || (A == 1 && !AliasScope))
1316344a3780SDimitry Andric return;
1317344a3780SDimitry Andric
1318344a3780SDimitry Andric for (User *U : Ptr->users()) {
1319c0981da4SDimitry Andric if (auto *I = dyn_cast<Instruction>(U)) {
1320c0981da4SDimitry Andric if (AliasScope && I->mayReadOrWriteMemory()) {
1321c0981da4SDimitry Andric MDNode *AS = I->getMetadata(LLVMContext::MD_alias_scope);
1322c0981da4SDimitry Andric AS = (AS ? MDNode::getMostGenericAliasScope(AS, AliasScope)
1323c0981da4SDimitry Andric : AliasScope);
1324c0981da4SDimitry Andric I->setMetadata(LLVMContext::MD_alias_scope, AS);
1325c0981da4SDimitry Andric
1326c0981da4SDimitry Andric MDNode *NA = I->getMetadata(LLVMContext::MD_noalias);
1327c0981da4SDimitry Andric NA = (NA ? MDNode::intersect(NA, NoAlias) : NoAlias);
1328c0981da4SDimitry Andric I->setMetadata(LLVMContext::MD_noalias, NA);
1329c0981da4SDimitry Andric }
1330c0981da4SDimitry Andric }
1331c0981da4SDimitry Andric
1332344a3780SDimitry Andric if (auto *LI = dyn_cast<LoadInst>(U)) {
1333344a3780SDimitry Andric LI->setAlignment(std::max(A, LI->getAlign()));
1334344a3780SDimitry Andric continue;
1335344a3780SDimitry Andric }
1336344a3780SDimitry Andric if (auto *SI = dyn_cast<StoreInst>(U)) {
1337344a3780SDimitry Andric if (SI->getPointerOperand() == Ptr)
1338344a3780SDimitry Andric SI->setAlignment(std::max(A, SI->getAlign()));
1339344a3780SDimitry Andric continue;
1340344a3780SDimitry Andric }
1341344a3780SDimitry Andric if (auto *AI = dyn_cast<AtomicRMWInst>(U)) {
1342344a3780SDimitry Andric // None of atomicrmw operations can work on pointers, but let's
1343344a3780SDimitry Andric // check it anyway in case it will or we will process ConstantExpr.
1344344a3780SDimitry Andric if (AI->getPointerOperand() == Ptr)
1345344a3780SDimitry Andric AI->setAlignment(std::max(A, AI->getAlign()));
1346344a3780SDimitry Andric continue;
1347344a3780SDimitry Andric }
1348344a3780SDimitry Andric if (auto *AI = dyn_cast<AtomicCmpXchgInst>(U)) {
1349344a3780SDimitry Andric if (AI->getPointerOperand() == Ptr)
1350344a3780SDimitry Andric AI->setAlignment(std::max(A, AI->getAlign()));
1351344a3780SDimitry Andric continue;
1352344a3780SDimitry Andric }
1353344a3780SDimitry Andric if (auto *GEP = dyn_cast<GetElementPtrInst>(U)) {
1354344a3780SDimitry Andric unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1355344a3780SDimitry Andric APInt Off(BitWidth, 0);
1356c0981da4SDimitry Andric if (GEP->getPointerOperand() == Ptr) {
1357c0981da4SDimitry Andric Align GA;
1358c0981da4SDimitry Andric if (GEP->accumulateConstantOffset(DL, Off))
1359c0981da4SDimitry Andric GA = commonAlignment(A, Off.getLimitedValue());
1360c0981da4SDimitry Andric refineUsesAlignmentAndAA(GEP, GA, DL, AliasScope, NoAlias,
1361c0981da4SDimitry Andric MaxDepth - 1);
1362344a3780SDimitry Andric }
1363344a3780SDimitry Andric continue;
1364344a3780SDimitry Andric }
1365344a3780SDimitry Andric if (auto *I = dyn_cast<Instruction>(U)) {
1366344a3780SDimitry Andric if (I->getOpcode() == Instruction::BitCast ||
1367344a3780SDimitry Andric I->getOpcode() == Instruction::AddrSpaceCast)
1368c0981da4SDimitry Andric refineUsesAlignmentAndAA(I, A, DL, AliasScope, NoAlias, MaxDepth - 1);
1369344a3780SDimitry Andric }
1370344a3780SDimitry Andric }
1371344a3780SDimitry Andric }
1372344a3780SDimitry Andric };
1373344a3780SDimitry Andric
1374b1c73532SDimitry Andric class AMDGPULowerModuleLDSLegacy : public ModulePass {
1375b1c73532SDimitry Andric public:
1376b1c73532SDimitry Andric const AMDGPUTargetMachine *TM;
1377b1c73532SDimitry Andric static char ID;
1378b1c73532SDimitry Andric
AMDGPULowerModuleLDSLegacy(const AMDGPUTargetMachine * TM_=nullptr)1379b1c73532SDimitry Andric AMDGPULowerModuleLDSLegacy(const AMDGPUTargetMachine *TM_ = nullptr)
1380b1c73532SDimitry Andric : ModulePass(ID), TM(TM_) {
1381b1c73532SDimitry Andric initializeAMDGPULowerModuleLDSLegacyPass(*PassRegistry::getPassRegistry());
1382b1c73532SDimitry Andric }
1383b1c73532SDimitry Andric
getAnalysisUsage(AnalysisUsage & AU) const1384b1c73532SDimitry Andric void getAnalysisUsage(AnalysisUsage &AU) const override {
1385b1c73532SDimitry Andric if (!TM)
1386b1c73532SDimitry Andric AU.addRequired<TargetPassConfig>();
1387b1c73532SDimitry Andric }
1388b1c73532SDimitry Andric
runOnModule(Module & M)1389b1c73532SDimitry Andric bool runOnModule(Module &M) override {
1390b1c73532SDimitry Andric if (!TM) {
1391b1c73532SDimitry Andric auto &TPC = getAnalysis<TargetPassConfig>();
1392b1c73532SDimitry Andric TM = &TPC.getTM<AMDGPUTargetMachine>();
1393b1c73532SDimitry Andric }
1394b1c73532SDimitry Andric
1395b1c73532SDimitry Andric return AMDGPULowerModuleLDS(*TM).runOnModule(M);
1396b1c73532SDimitry Andric }
1397b1c73532SDimitry Andric };
1398b1c73532SDimitry Andric
1399344a3780SDimitry Andric } // namespace
1400b1c73532SDimitry Andric char AMDGPULowerModuleLDSLegacy::ID = 0;
1401344a3780SDimitry Andric
1402b1c73532SDimitry Andric char &llvm::AMDGPULowerModuleLDSLegacyPassID = AMDGPULowerModuleLDSLegacy::ID;
1403344a3780SDimitry Andric
1404b1c73532SDimitry Andric INITIALIZE_PASS_BEGIN(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE,
1405b1c73532SDimitry Andric "Lower uses of LDS variables from non-kernel functions",
1406b1c73532SDimitry Andric false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)1407b1c73532SDimitry Andric INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
1408b1c73532SDimitry Andric INITIALIZE_PASS_END(AMDGPULowerModuleLDSLegacy, DEBUG_TYPE,
1409b1c73532SDimitry Andric "Lower uses of LDS variables from non-kernel functions",
1410b1c73532SDimitry Andric false, false)
1411344a3780SDimitry Andric
1412b1c73532SDimitry Andric ModulePass *
1413b1c73532SDimitry Andric llvm::createAMDGPULowerModuleLDSLegacyPass(const AMDGPUTargetMachine *TM) {
1414b1c73532SDimitry Andric return new AMDGPULowerModuleLDSLegacy(TM);
1415344a3780SDimitry Andric }
1416344a3780SDimitry Andric
run(Module & M,ModuleAnalysisManager &)1417344a3780SDimitry Andric PreservedAnalyses AMDGPULowerModuleLDSPass::run(Module &M,
1418344a3780SDimitry Andric ModuleAnalysisManager &) {
1419b1c73532SDimitry Andric return AMDGPULowerModuleLDS(TM).runOnModule(M) ? PreservedAnalyses::none()
1420344a3780SDimitry Andric : PreservedAnalyses::all();
1421344a3780SDimitry Andric }
1422