From 29912c918ff881d6b12ed2c54ab4c6c66e8ac911 Mon Sep 17 00:00:00 2001 From: James Arram Date: Wed, 27 Jan 2016 15:27:40 +0000 Subject: [PATCH] added multi-channel LMem test design --- test/Tests/LMemMultiChannel/Readme.md | 12 ++ .../src/LMemMultiChannelCpuCode.cpp | 45 +++++++ .../src/LMemMultiChannelKernel.maxj | 20 ++++ .../src/LMemMultiChannelManager.maxj | 112 ++++++++++++++++++ 4 files changed, 189 insertions(+) create mode 100644 test/Tests/LMemMultiChannel/Readme.md create mode 100644 test/Tests/LMemMultiChannel/src/LMemMultiChannelCpuCode.cpp create mode 100644 test/Tests/LMemMultiChannel/src/LMemMultiChannelKernel.maxj create mode 100644 test/Tests/LMemMultiChannel/src/LMemMultiChannelManager.maxj diff --git a/test/Tests/LMemMultiChannel/Readme.md b/test/Tests/LMemMultiChannel/Readme.md new file mode 100644 index 0000000..063d6b4 --- /dev/null +++ b/test/Tests/LMemMultiChannel/Readme.md @@ -0,0 +1,12 @@ +# Multi-Channel LMem design + +This is a simple pass-through kernel built to test the multiple memory +channel feature in MaxCompiler 2015.2 + +##Notes +1. You can vary the number of kernes and memory channels by setting +the **n_kernel** parameter in the manager. This value must be in the +range [1, 6] +2. This design works on Maia, but not for max3 (possible bug in API) +3. Any questions please email me (jma11_at_ic.ac.uk) + diff --git a/test/Tests/LMemMultiChannel/src/LMemMultiChannelCpuCode.cpp b/test/Tests/LMemMultiChannel/src/LMemMultiChannelCpuCode.cpp new file mode 100644 index 0000000..2287ec0 --- /dev/null +++ b/test/Tests/LMemMultiChannel/src/LMemMultiChannelCpuCode.cpp @@ -0,0 +1,45 @@ +/*** + This is a simple demo project that you can copy to get started. + Comments blocks starting with '***' and subsequent non-empty lines + are automatically added to this project's wiki page. +*/ + +#include +#include +#include +#include +#include +#include +#include "Maxfiles.h" +#include "MaxSLiCInterface.h" + +int main(void) +{ + + const uint32_t inSize = 384; + + std::vector in(inSize), out0(inSize, 0), out1(inSize, 0); + + for(uint32_t i = 0; i < inSize; ++i) { + in[i] = i; + } + + std::cout << "Writing data to LMem." << std::endl; + LMemMultiChannel_Write(inSize, &in[0]); + + std::cout << "Running DFE." << std::endl; + LMemMultiChannel_Exec(inSize, &out0[0], &out1[0]); + + /*** + Note that you should always test the output of your DFE + design against a CPU version to ensure correctness. + */ + for (uint32_t i = 0; i < inSize; i++) + if (in[i] != out0[i] && in[i] != out1[i]) { + printf("Output from DFE did not match CPU\n"); + return 1; + } + + std::cout << "Test passed!" << std::endl; + return 0; +} diff --git a/test/Tests/LMemMultiChannel/src/LMemMultiChannelKernel.maxj b/test/Tests/LMemMultiChannel/src/LMemMultiChannelKernel.maxj new file mode 100644 index 0000000..7bafb3f --- /dev/null +++ b/test/Tests/LMemMultiChannel/src/LMemMultiChannelKernel.maxj @@ -0,0 +1,20 @@ +/*** + A simple pass-through Kernel + + A single stream of data is read from LMem and directly output to host CPU +*/ + +import com.maxeler.maxcompiler.v2.kernelcompiler.Kernel; +import com.maxeler.maxcompiler.v2.kernelcompiler.KernelParameters; +import com.maxeler.maxcompiler.v2.kernelcompiler.types.base.DFEVar; + +class LMemMultiChannelKernel extends Kernel { + + protected LMemMultiChannelKernel(KernelParameters parameters) { + super(parameters); + + DFEVar a = io.input("k_in", dfeInt(32)); + io.output("k_out", a, dfeInt(32)); + } + +} diff --git a/test/Tests/LMemMultiChannel/src/LMemMultiChannelManager.maxj b/test/Tests/LMemMultiChannel/src/LMemMultiChannelManager.maxj new file mode 100644 index 0000000..434edb6 --- /dev/null +++ b/test/Tests/LMemMultiChannel/src/LMemMultiChannelManager.maxj @@ -0,0 +1,112 @@ +/*** + n_kernel pass through Kernels are instantiated on the DFE + Each Kernel has its own independent LMem channel + A fanout is used to send the same input data to each active LMem module +*/ + +import com.maxeler.maxcompiler.v2.build.EngineParameters; +import com.maxeler.maxcompiler.v2.managers.custom.CustomManager; +import com.maxeler.maxcompiler.v2.managers.custom.DFELink; +import com.maxeler.maxcompiler.v2.managers.custom.blocks.KernelBlock; +import com.maxeler.maxcompiler.v2.managers.custom.stdlib.LMemCommandGroup; +import com.maxeler.maxcompiler.v2.managers.engine_interfaces.CPUTypes; +import com.maxeler.maxcompiler.v2.managers.engine_interfaces.EngineInterface; +import com.maxeler.maxcompiler.v2.managers.engine_interfaces.EngineInterface.Direction; +import com.maxeler.maxcompiler.v2.managers.engine_interfaces.InterfaceParam; +import com.maxeler.maxcompiler.v2.managers.custom.blocks.Fanout; +import com.maxeler.maxcompiler.v2.managers.custom.stdlib.LMemInterface; + +public class LMemMultiChannelManager extends CustomManager{ + + private static final String s_kernelName = "LMemMultiChannelKernel"; + + private static final int n_kernel = 2; + + LMemMultiChannelManager(EngineParameters ep) + { + super(ep); + + // instantiate memory channels + LMemInterface[] iface = new LMemInterface[n_kernel]; + for (int i = 0; i < n_kernel; i++) { + iface[i] = addLMemInterface("ctrl" + i, 1); + } + + // instantiate kernels + for (int i = 0; i < n_kernel; i++) { + KernelBlock k = addKernel(new LMemMultiChannelKernel(makeKernelParameters(s_kernelName + i))); + + DFELink lmemToKernel = iface[i].addStreamFromLMem("k_in" + i, LMemCommandGroup.MemoryAccessPattern.LINEAR_1D); + k.getInput("k_in") <== lmemToKernel; + + DFELink kernelToHost = addStreamToCPU("k_out" + i); + kernelToHost <== k.getOutput("k_out"); + } + + // data to LMem + DFELink hostToMger = addStreamFromCPU("hostToMger"); + Fanout fan = fanout("fan"); + fan.getInput() <== hostToMger; + for (int i = 0; i < n_kernel; i++) { + DFELink fanOut = fan.addOutput("fanOut" + i); + DFELink mgerToLMem = iface[i].addStreamToLMem("mgerToLMem" + i, LMemCommandGroup.MemoryAccessPattern.LINEAR_1D); + mgerToLMem <== fanOut; + } + } + + // write data to LMem + private static EngineInterface interfaceWrite(String name) { + EngineInterface engine_interface = new EngineInterface(name); + CPUTypes type = CPUTypes.UINT32; + int size = type.sizeInBytes(); + + String routeString = ""; + InterfaceParam N = engine_interface.addParam("N", type); + InterfaceParam sizeBytes = N * size; + InterfaceParam zero = engine_interface.addConstant(0l); + + engine_interface.setStream("hostToMger", type, sizeBytes); + for (int i = 0; i < n_kernel; i++) { + engine_interface.setLMemLinear("ctrl" + i, "mgerToLMem" + i, zero, sizeBytes); + String tmp; + if (i < n_kernel-1) + tmp = String.format("fan -> fanOut%d, ", i); + else + tmp = String.format("fan -> fanOut%d", i); + routeString += tmp; + } + + engine_interface.route(routeString); + engine_interface.ignoreAll(Direction.IN_OUT); + + return engine_interface; + } + + // run pass through kernel + private static EngineInterface interfaceExec(String name) { + EngineInterface engine_interface = new EngineInterface(name); + CPUTypes type = CPUTypes.UINT32; + int size = type.sizeInBytes(); + + InterfaceParam N = engine_interface.addParam("N", CPUTypes.INT); + InterfaceParam sizeBytes = N * size; + InterfaceParam zero = engine_interface.addConstant(0l); + + for (int i = 0; i < n_kernel; i++) { + engine_interface.setTicks(s_kernelName + i, N); + engine_interface.setStream("k_out" + i, type, sizeBytes); + engine_interface.setLMemLinear("ctrl" + i, "k_in" + i, zero, sizeBytes); + } + engine_interface.ignoreAll(Direction.IN_OUT); + return engine_interface; + } + + + public static void main(String[] args) { + LMemMultiChannelManager manager = new LMemMultiChannelManager(new EngineParameters(args)); + manager.createSLiCinterface(interfaceWrite("Write")); + manager.createSLiCinterface(interfaceExec("Exec")); + manager.suppressDefaultInterface(); + manager.build(); + } +}