Using Quantum Hardware Providers¶
CUDA-Q contains support for using a set of hardware providers (IonQ, IQM, OQC, and Quantinuum). For more information about executing quantum kernels on different hardware backends, please take a look at hardware.
The following code illustrates how to run kernels on IonQ’s backends.
import cudaq
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# To use different targets in the same file, you must update
# it via another call to `cudaq.set_target()`
cudaq.set_target("ionq")
# Create the kernel we'd like to execute on IonQ.
@cudaq.kernel
def kernel():
qvector = cudaq.qvector(2)
h(qvector[0])
x.ctrl(qvector[0], qvector[1])
# Note: All qubits will be measured at the end upon performing
# the sampling. You may encounter a pre-flight error on IonQ
# backends if you include explicit measurements.
# Execute on IonQ and print out the results.
# Option A:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by IonQ. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or we can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
# Option B:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been returned from IonQ.
counts = cudaq.sample(kernel)
print(counts)
// Compile and run with:
// ```
// nvq++ --target ionq ionq.cpp -o out.x && ./out.x
// ```
// This will submit the job to the IonQ ideal simulator target (default).
// Alternatively, we can enable hardware noise model simulation by specifying
// the `--ionq-noise-model`, e.g.,
// ```
// nvq++ --target ionq --ionq-machine simulator --ionq-noise-model aria-1
// ionq.cpp -o out.x && ./out.x
// ```
// where we set the noise model to mimic the 'aria-1' hardware device.
// Please refer to your IonQ Cloud dashboard for the list of simulator noise
// models.
// Note: `--ionq-machine simulator` is optional since 'simulator' is the
// default configuration if not provided. Assumes a valid set of credentials
// have been stored.
#include <cudaq.h>
#include <fstream>
// Define a simple quantum kernel to execute on IonQ.
struct ghz {
// Maximally entangled state between 5 qubits.
auto operator()() __qpu__ {
cudaq::qvector q(5);
h(q[0]);
for (int i = 0; i < 4; i++) {
x<cudaq::ctrl>(q[i], q[i + 1]);
}
auto result = mz(q);
}
};
int main() {
// Submit to IonQ asynchronously (e.g., continue executing
// code in the file until the job has been returned).
auto future = cudaq::sample_async(ghz{});
// ... classical code to execute in the meantime ...
// Can write the future to file:
{
std::ofstream out("saveMe.json");
out << future;
}
// Then come back and read it in later.
cudaq::async_result<cudaq::sample_result> readIn;
std::ifstream in("saveMe.json");
in >> readIn;
// Get the results of the read in future.
auto async_counts = readIn.get();
async_counts.dump();
// OR: Submit to IonQ synchronously (e.g., wait for the job
// result to be returned before proceeding).
auto counts = cudaq::sample(ghz{});
counts.dump();
}
The following code illustrates how to run kernels on IQM’s backends.
import cudaq
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# To use different targets in the same file, you must update
# it via another call to `cudaq.set_target()`
cudaq.set_target("iqm",
url="http://localhost/cocos",
**{"qpu-architecture": "Adonis"})
# Adonis QPU architecture:
# QB1
# |
# QB2 - QB3 - QB4
# |
# QB5
# Create the kernel we'd like to execute on IQM.
@cudaq.kernel
def kernel():
qvector = cudaq.qvector(5)
h(qvector[2]) # QB3
x.ctrl(qvector[2], qvector[0])
mz(qvector)
# Execute on IQM Server and print out the results.
# Option A:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by IQM Server. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or we can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
# Option B:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been returned from IQM Server.
counts = cudaq.sample(kernel)
print(counts)
// Compile and run with:
// ```
// nvq++ --target iqm iqm.cpp --iqm-machine Adonis -o out.x && ./out.x
// ```
// Assumes a valid set of credentials have been stored.
#include <cudaq.h>
#include <fstream>
// Define a simple quantum kernel to execute on IQM Server.
struct adonis_ghz {
// Maximally entangled state between 5 qubits on Adonis QPU.
// QB1
// |
// QB2 - QB3 - QB4
// |
// QB5
void operator()() __qpu__ {
cudaq::qvector q(5);
h(q[0]);
// Note that the CUDA-Q compiler will automatically generate the
// necessary instructions to swap qubits to satisfy the required
// connectivity constraints for the Adonis QPU. In this program, that means
// that despite QB1 not being physically connected to QB2, the user can
// still perform joint operations q[0] and q[1] because the compiler will
// automatically (and transparently) inject the necessary swap instructions
// to execute the user's program without the user having to worry about the
// physical constraints.
for (int i = 0; i < 4; i++) {
x<cudaq::ctrl>(q[i], q[i + 1]);
}
auto result = mz(q);
}
};
int main() {
// Submit to IQM Server asynchronously. E.g, continue executing
// code in the file until the job has been returned.
auto future = cudaq::sample_async(adonis_ghz{});
// ... classical code to execute in the meantime ...
// Can write the future to file:
{
std::ofstream out("saveMe.json");
out << future;
}
// Then come back and read it in later.
cudaq::async_result<cudaq::sample_result> readIn;
std::ifstream in("saveMe.json");
in >> readIn;
// Get the results of the read in future.
auto async_counts = readIn.get();
async_counts.dump();
// OR: Submit to IQM Server synchronously. E.g, wait for the job
// result to be returned before proceeding.
auto counts = cudaq::sample(adonis_ghz{});
counts.dump();
}
The following code illustrates how to run kernels on OQC’s backends.
import cudaq
import os
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# To use different targets in the same file, you must update
# it via another call to `cudaq.set_target()`
# To use the OQC target you will need to set the following environment variables
# OQC_URL
# OQC_EMAIL
# OQC_PASSWORD
# To setup an account, contact oqc_qcaas_support@oxfordquantumcircuits.com
cudaq.set_target("oqc")
# Create the kernel we'd like to execute on OQC.
@cudaq.kernel
def kernel():
qvector = cudaq.qvector(2)
h(qvector[0])
x.ctrl[qvector[1], qvector[1]]
mz(qvector)
# Option A:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by OQC. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or we can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
# Option B:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been returned from OQC.
counts = cudaq.sample(kernel)
print(counts)
The following code illustrates how to run kernels on Quantinuum’s backends.
import cudaq
# You only have to set the target once! No need to redefine it
# for every execution call on your kernel.
# By default, we will submit to the Quantinuum syntax checker.
cudaq.set_target("quantinuum")
# Create the kernel we'd like to execute on Quantinuum.
@cudaq.kernel
def kernel():
qvector = cudaq.qvector(2)
h(qvector[0])
x.ctrl(qvector[0], qvector[1])
mz(qvector[0])
mz(qvector[1])
# Submit to Quantinuum's endpoint and confirm the program is valid.
# Option A:
# By using the synchronous `cudaq.sample`, the execution of
# any remaining classical code in the file will occur only
# after the job has been executed by the Quantinuum service.
# We will use the synchronous call to submit to the syntax
# checker to confirm the validity of the program.
syntax_check = cudaq.sample(kernel)
if (syntax_check):
print("Syntax check passed! Kernel is ready for submission.")
# Now we can update the target to the Quantinuum emulator and
# execute our program.
cudaq.set_target("quantinuum", machine="H1-2E")
# Option B:
# By using the asynchronous `cudaq.sample_async`, the remaining
# classical code will be executed while the job is being handled
# by Quantinuum. This is ideal when submitting via a queue over
# the cloud.
async_results = cudaq.sample_async(kernel)
# ... more classical code to run ...
# We can either retrieve the results later in the program with
# ```
# async_counts = async_results.get()
# ```
# or we can also write the job reference (`async_results`) to
# a file and load it later or from a different process.
file = open("future.txt", "w")
file.write(str(async_results))
file.close()
# We can later read the file content and retrieve the job
# information and results.
same_file = open("future.txt", "r")
retrieved_async_results = cudaq.AsyncSampleResult(str(same_file.read()))
counts = retrieved_async_results.get()
print(counts)
// Compile and run with:
// ```
// nvq++ --target quantinuum --quantinuum-machine H1-2E quantinuum.cpp -o out.x
// ./out.x
// ```
// Assumes a valid set of credentials have been stored.
// To first confirm the correctness of the program locally,
// Add a --emulate to the `nvq++` command above.
#include <cudaq.h>
#include <fstream>
// Define a simple quantum kernel to execute on Quantinuum.
struct ghz {
// Maximally entangled state between 5 qubits.
auto operator()() __qpu__ {
cudaq::qvector q(5);
h(q[0]);
for (int i = 0; i < 4; i++) {
x<cudaq::ctrl>(q[i], q[i + 1]);
}
mz(q);
}
};
int main() {
// Submit to Quantinuum asynchronously (e.g., continue executing
// code in the file until the job has been returned).
auto future = cudaq::sample_async(ghz{});
// ... classical code to execute in the meantime ...
// Can write the future to file:
{
std::ofstream out("saveMe.json");
out << future;
}
// Then come back and read it in later.
cudaq::async_result<cudaq::sample_result> readIn;
std::ifstream in("saveMe.json");
in >> readIn;
// Get the results of the read in future.
auto async_counts = readIn.get();
async_counts.dump();
// OR: Submit to Quantinuum synchronously (e.g., wait for the job
// result to be returned before proceeding).
auto counts = cudaq::sample(ghz{});
counts.dump();
}