Skip to content

Commit

Permalink
Merge branch 'main' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
Quleaf authored Jul 13, 2024
2 parents 88a369e + 0f7a56b commit 5027c2e
Show file tree
Hide file tree
Showing 62 changed files with 2,230 additions and 281 deletions.
33 changes: 9 additions & 24 deletions docker/release/cudaq.nvqc.Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -16,29 +16,14 @@
ARG base_image=nvcr.io/nvidia/nightly/cuda-quantum:latest
FROM $base_image as nvcf_image

ADD tools/cudaq-qpud/nvqc_proxy.py /
ADD tools/cudaq-qpud/json_request_runner.py /
# Run the tar command and then uncomment ADD cudaq.tar.gz ... in order to
# override the installation.
# tar czvf /workspaces/cuda-quantum/cudaq.tar.gz -C /usr/local/cudaq .
# ADD cudaq.tar.gz /opt/nvidia/cudaq

# Launch script: launch cudaq-qpud (nvcf mode) with MPI ranks == Number of NVIDIA GPUs
# IMPORTANT:
# (1) NVCF function must set container environment variable `NUM_GPUS`
# equal to the number of GPUs on the target platform. This will allow clients to query
# the function capability (number of GPUs) by looking at function info. The below
# entry point script helps prevent mis-configuration by checking that functions are
# created and deployed appropriately.
# (2) NVCF function must set container environment variable `NVQC_REST_PAYLOAD_VERSION` equal
# to the RestRequest payload version with which `cudaq-qpud` in the deployment Docker image was compiled.
# Failure to do so will result in early exits of the entry point command, thus deployment failure.
RUN echo 'cat /opt/nvidia/cudaq/build_info.txt;' \
'EXPECTED_REST_PAYLOAD_VERSION="$(cudaq-qpud --type nvcf --schema-version | grep -o "CUDA-Q REST API version: \S*" | cut -d ":" -f 2 | tr -d " ")" ;' \
'if [[ "$NVQC_REST_PAYLOAD_VERSION" != "$EXPECTED_REST_PAYLOAD_VERSION" ]]; ' \
' then echo "Invalid Deployment: NVQC_REST_PAYLOAD_VERSION environment variable ($NVQC_REST_PAYLOAD_VERSION) does not match cudaq-qpud (expected $EXPECTED_REST_PAYLOAD_VERSION)." && exit 1; fi;' \
'python3 /nvqc_proxy.py & ' \
'if [[ "$NUM_GPUS" == "$(nvidia-smi --list-gpus | wc -l)" ]]; then ' \
'while true; do ' \
'mpiexec -np $(nvidia-smi --list-gpus | wc -l) cudaq-qpud --type nvcf --port 3031;' \
'done; ' \
'else echo "Invalid Deployment: Number of GPUs does not match the hardware" && exit 1; fi' > launch.sh
RUN sudo mkdir /nvqc_scripts
ADD tools/cudaq-qpud/nvqc_proxy.py /nvqc_scripts
ADD tools/cudaq-qpud/json_request_runner.py /nvqc_scripts
ADD scripts/nvqc_launch.sh /nvqc_scripts

# Start the cudaq-qpud service
ENTRYPOINT ["bash", "-l", "launch.sh"]
ENTRYPOINT ["bash", "-l", "/nvqc_scripts/nvqc_launch.sh"]
32 changes: 23 additions & 9 deletions docs/sphinx/examples/python/tutorials/hybrid_qnns.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -28,16 +28,26 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# Import the relevant packages.\n",
"# Install the relevant packages.\n",
"\n",
"!pip install matplotlib==3.8.4\n",
"!pip install torch==2.2.2\n",
"!pip install torchvision==0.17.0\n",
"!pip install scikit-learn==1.4.2\n",
"!pip install scikit-learn==1.4.2"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Import the relevant libraries \n",
"\n",
"import cudaq\n",
"from cudaq import spin\n",
Expand Down Expand Up @@ -70,8 +80,8 @@
"device = torch.device('cpu')\n",
"cudaq.set_target(\"qpp-cpu\")\n",
"\n",
"# cudaq.set_target(\"nvidia\")\n",
"# device = torch.device(\"cuda:0\")"
"#cudaq.set_target(\"nvidia\")\n",
"#device = torch.device(\"cuda:0\")"
]
},
{
Expand Down Expand Up @@ -177,8 +187,12 @@
],
"source": [
"# Plot some images from the training set to visualise.\n",
"\n",
"grid_img = torchvision.utils.make_grid(x_train[:10],\n",
"if device != 'cpu':\n",
" sample_to_plot = x_train[:10].to(torch.device('cpu'))\n",
"else:\n",
" sample_to_plot = x_train[:10]\n",
" \n",
"grid_img = torchvision.utils.make_grid(sample_to_plot,\n",
" nrow=5,\n",
" padding=3,\n",
" normalize=True)\n",
Expand Down Expand Up @@ -344,7 +358,7 @@
},
{
"cell_type": "code",
"execution_count": 11,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
Expand Down Expand Up @@ -459,7 +473,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]"
"version": "3.10.12"
},
"orig_nbformat": 4,
"vscode": {
Expand Down
23 changes: 14 additions & 9 deletions include/cudaq/Frontend/nvqpp/ASTBridge.h
Original file line number Diff line number Diff line change
Expand Up @@ -144,18 +144,19 @@ class QuakeBridgeVisitor
using Base = clang::RecursiveASTVisitor<QuakeBridgeVisitor>;

public:
explicit QuakeBridgeVisitor(clang::ASTContext *astCtx,
mlir::MLIRContext *mlirCtx, mlir::OpBuilder &bldr,
mlir::ModuleOp module, SymbolTable &symTab,
EmittedFunctionsCollection &funcsToEmit,
llvm::ArrayRef<clang::Decl *> reachableFuncs,
MangledKernelNamesMap &namesMap,
clang::CompilerInstance &ci,
clang::ItaniumMangleContext *mangler)
explicit QuakeBridgeVisitor(
clang::ASTContext *astCtx, mlir::MLIRContext *mlirCtx,
mlir::OpBuilder &bldr, mlir::ModuleOp module, SymbolTable &symTab,
EmittedFunctionsCollection &funcsToEmit,
llvm::ArrayRef<clang::Decl *> reachableFuncs,
MangledKernelNamesMap &namesMap, clang::CompilerInstance &ci,
clang::ItaniumMangleContext *mangler,
std::unordered_map<std::string, std::string> &customOperations)
: astContext(astCtx), mlirContext(mlirCtx), builder(bldr), module(module),
symbolTable(symTab), functionsToEmit(funcsToEmit),
reachableFunctions(reachableFuncs), namesMap(namesMap),
compilerInstance(ci), mangler(mangler) {}
compilerInstance(ci), mangler(mangler),
customOperationNames(customOperations) {}

/// `nvq++` renames quantum kernels to differentiate them from classical C++
/// code. This renaming is done on function names. \p tag makes it easier
Expand Down Expand Up @@ -597,6 +598,7 @@ class QuakeBridgeVisitor
clang::ItaniumMangleContext *mangler;
std::string loweredFuncName;
llvm::SmallVector<mlir::Value> negations;
std::unordered_map<std::string, std::string> &customOperationNames;

//===--------------------------------------------------------------------===//
// Type traversals
Expand Down Expand Up @@ -683,6 +685,9 @@ class ASTBridgeAction : public clang::ASTFrontendAction {
// The mangler is constructed and owned by `this`.
clang::ItaniumMangleContext *mangler;

// Keep track of user custom operation names.
std::unordered_map<std::string, std::string> customOperationNames;

/// Add a placeholder definition to the module in \p visitor for the
/// function, \p funcDecl. This is used for adding the host-side function
/// corresponding to the kernel. The code for this function will be
Expand Down
1 change: 1 addition & 0 deletions include/cudaq/Optimizer/CodeGen/Pipelines.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,4 +47,5 @@ void addLowerToCCPipeline(mlir::OpPassManager &pm);

void addPipelineTranslateToOpenQASM(mlir::PassManager &pm);
void addPipelineTranslateToIQMJson(mlir::PassManager &pm);

} // namespace cudaq::opt
2 changes: 2 additions & 0 deletions include/cudaq/Optimizer/CodeGen/QIRFunctionNames.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,8 @@ static constexpr const char QIRCphase[] = "__quantum__qis__cphase";
static constexpr const char QIRReadResultBody[] =
"__quantum__qis__read_result__body";

static constexpr const char QIRCustomOp[] = "__quantum__qis__custom_unitary";

static constexpr const char NVQIRInvokeWithControlBits[] =
"invokeWithControlQubits";
static constexpr const char NVQIRInvokeRotationWithControlBits[] =
Expand Down
2 changes: 2 additions & 0 deletions include/cudaq/Optimizer/Dialect/CC/CCOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -90,6 +90,8 @@ def cc_ScopeOp : CCOp<"scope",
let extraClassDeclaration = [{
using BodyBuilderFn =
llvm::function_ref<void(mlir::OpBuilder &, mlir::Location)>;

bool hasAllocation(bool quantumAllocs = true);
}];
}

Expand Down
76 changes: 73 additions & 3 deletions include/cudaq/Optimizer/Dialect/Quake/QuakeOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -829,18 +829,19 @@ def quake_DiscriminateOp : QuakeOp<"discriminate", [Pure]> {
// Quantum gates
//===----------------------------------------------------------------------===//

class QuakeOperator<string mnemonic, list<Trait> traits = []>
class QuakeOperator<string mnemonic, list<Trait> traits = [],
dag extraArgs = (ins)>
: QuakeOp<mnemonic,
!listconcat([QuantumGate, AttrSizedOperandSegments, OperatorInterface,
DeclareOpInterfaceMethods<MemoryEffectsOpInterface>], traits)> {

let arguments = (ins
let arguments = !con(extraArgs, (ins
UnitAttr:$is_adj,
Variadic<AnyFloat>:$parameters,
Variadic<AnyQType>:$controls,
Variadic<AnyQTargetType>:$targets,
OptionalAttr<DenseBoolArrayAttr>:$negated_qubit_controls
);
));
let results = (outs
Variadic<WireType>:$wires
);
Expand Down Expand Up @@ -1249,4 +1250,73 @@ def ZOp : OneTargetOp<"z", [Hermitian]> {
}];
}

def CustomUnitarySymbolOp : QuakeOperator<"custom_op", [], (ins SymbolRefAttr:$generator)> {
let summary = "Custom unitary operation.";
let description = [{
Custom unitary operation leveraging a `SymbolRefAttr` describing the unitary
data generator function.
}];

let builders = [
OpBuilder<(ins "mlir::SymbolRefAttr":$generator,
"mlir::UnitAttr":$is_adj,
"mlir::ValueRange":$parameters,
"mlir::ValueRange":$controls,
"mlir::ValueRange":$targets,
"mlir::DenseBoolArrayAttr":$negates), [{
return build($_builder, $_state, mlir::TypeRange{}, generator, is_adj,
parameters, controls, targets, negates);
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator,
"bool":$is_adj,
"mlir::ValueRange":$parameters,
"mlir::ValueRange":$controls,
"mlir::ValueRange":$targets,
"mlir::DenseBoolArrayAttr":$negates), [{
return build($_builder, $_state, mlir::TypeRange{}, generator, is_adj,
parameters, controls, targets, negates);
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator,
"bool":$is_adj,
"mlir::ValueRange":$parameters,
"mlir::ValueRange":$controls,
"mlir::ValueRange":$targets), [{
return build($_builder, $_state, generator, is_adj, parameters,
controls, targets, {});
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator,
"mlir::ValueRange":$parameters,
"mlir::ValueRange":$controls,
"mlir::ValueRange":$targets), [{
return build($_builder, $_state, generator, /*is_adj=*/false,
parameters, controls, targets);
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator,
"bool":$is_adj,
"mlir::ValueRange":$controls,
"mlir::ValueRange":$targets), [{
return build($_builder, $_state, generator, is_adj,
mlir::ValueRange{}, controls, targets);
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator,
"mlir::ValueRange":$controls,
"mlir::ValueRange":$targets), [{
return build($_builder, $_state, generator, /*is_adj=*/false, controls, targets);
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator, "bool":$is_adj,
"mlir::ValueRange":$targets), [{
return build($_builder, $_state, generator, is_adj, mlir::ValueRange{}, targets);
}]>,
OpBuilder<(ins "mlir::SymbolRefAttr":$generator, "mlir::ValueRange":$targets), [{
return build($_builder, $_state, generator, /*is_adj=*/false, targets);
}]>
];

let assemblyFormat = [{ $generator
( `<` `adj` $is_adj^ `>` )? ( `(` $parameters^ `)` )?
(`[` $controls^ (`neg` $negated_qubit_controls^ )? `]`)?
$targets `:` functional-type(operands, results) attr-dict
}];
}

#endif // CUDAQ_OPTIMIZER_DIALECT_QUAKE_OPS
65 changes: 54 additions & 11 deletions include/cudaq/Optimizer/Transforms/Passes.td
Original file line number Diff line number Diff line change
Expand Up @@ -47,17 +47,6 @@ def ApplySpecialization : Pass<"apply-op-specialization", "mlir::ModuleOp"> {
];
}

def PySynthCallableBlockArgs : Pass<"py-synth-callable-block-args", "mlir::func::FuncOp"> {
let summary = "Synthesize / Inline cc.callable_func on function block arguments.";
let description = [{
This pass is leveraged by the Python bindings to synthesize any
cc.callable block arguments. By synthesis we mean replace all uses of the
callable block argument with a specific in-Module function call (func.call)
retrieved at runtime (the name of the function passed to the kernel at the
cc.callable block argument index).
}];
}

def BasisConversionPass: Pass<"basis-conversion", "mlir::ModuleOp"> {
let summary = "Converts kernels to a set of basis operations.";
let description = [{
Expand Down Expand Up @@ -254,6 +243,48 @@ def GenerateKernelExecution : Pass<"kernel-execution", "mlir::ModuleOp"> {
];
}

def GetConcreteMatrix : Pass<"get-concrete-matrix", "mlir::func::FuncOp"> {
let summary = "Replace the unitary matrix generator function with concrete matrix.";
let description = [{
Given a custom operation whose generator attribute is another function
within the module, such that if `LiftArrayAlloc` pass has run, there will
be a global constant within the module which holds the concrete matrix
representation for the custom operation. This pass will find that global
variable and update the custom operation to directly point to it.

Example:
```mlir
module {
func.func @__nvqpp__mlirgen__function_foo_generator_1.bar(%arg0: !cc.stdvec<f64>) -> !cc.stdvec<complex<f64>> {
...
%0 = cc.address_of @__nvqpp__mlirgen__function_foo_generator_1.bar.rodata_0 : !cc.ptr<!cc.array<complex<f64> x 4>>
...
return %3 : !cc.stdvec<complex<f64>>
}

func.func @__nvqpp__mlirgen__function_kernel_1._Z8kernel_1v() {
%0 = quake.alloca !quake.ref
quake.custom_op @__nvqpp__mlirgen__function_foo_generator_1.bar %0 : (!quake.ref) -> ()
return
}

cc.global constant @__nvqpp__mlirgen__function_foo_generator_1.bar.rodata_0 ((dense<[(0.000000e+00,0.000000e+00), (1.000000e+00,0.000000e+00), (1.000000e+00,0.000000e+00), (0.000000e+00,0.000000e+00)]> : tensor<4xcomplex<f64>>) : !cc.array<complex<f64> x 4>
}
```

The `quake.custom_op` call would be converted to

```mlir
func.func @__nvqpp__mlirgen__function_kernel_1._Z8kernel_1v() {
%0 = quake.alloca !quake.ref
quake.custom_op @__nvqpp__mlirgen__function_foo_generator_1.bar.rodata_0 %0 : (!quake.ref) -> ()
return
}
```

}];
}

// LambdaLifting is a module pass because it may modify the ModuleOp and add
// new FuncOps.
def LambdaLifting : Pass<"lambda-lifting", "mlir::ModuleOp"> {
Expand Down Expand Up @@ -572,6 +603,18 @@ def PruneCtrlRelations : Pass<"pruned-ctrl-form", "mlir::func::FuncOp"> {
}];
}

def PySynthCallableBlockArgs :
Pass<"py-synth-callable-block-args", "mlir::func::FuncOp"> {
let summary = "Synthesize / Inline cc.callable_func on function block arguments.";
let description = [{
This pass is leveraged by the Python bindings to synthesize any
cc.callable block arguments. By synthesis we mean replace all uses of the
callable block argument with a specific in-Module function call (func.call)
retrieved at runtime (the name of the function passed to the kernel at the
cc.callable block argument index).
}];
}

def QuakeSynthesize : Pass<"quake-synth", "mlir::ModuleOp"> {
let summary =
"Synthesize concrete quantum program from Quake code plus runtime values.";
Expand Down
Loading

0 comments on commit 5027c2e

Please sign in to comment.