cuda::experimental::stf::exec_place#

class exec_place#

Indicates where a computation takes place (CPU, dev0, dev1, …)

Currently data and computation are together (devid == int(data_place)).

Subclassed by cuda::experimental::stf::exec_place_cuda_stream, cuda::experimental::stf::exec_place_green_ctx, cuda::experimental::stf::exec_place_grid, cuda::experimental::stf::exec_place_host

Public Functions

exec_place() = default#
inline exec_place(const data_place &affine)#
inline bool operator==(const exec_place &rhs) const#
inline bool operator!=(const exec_place &rhs) const#
inline bool operator<(const exec_place &rhs) const#
inline iterator begin()#
inline iterator end()#
inline ::std::string to_string() const#

Returns a string representation of the execution place object.

Returns:

std::string

inline const data_place affine_data_place() const#

Returns the data_place naturally associated with this execution place.

inline void set_affine_data_place(data_place place)#
inline stream_pool &get_stream_pool(
async_resources_handle &async_resources,
bool for_computation
) const#
inline decorated_stream getStream(
async_resources_handle &async_resources,
bool for_computation
) const#

Get a decorated stream from the stream pool associated to this execution place.

This method can be used to obtain CUDA streams from execution places without requiring a CUDASTF context. This is useful when you want to use CUDASTF’s place abstractions (devices, green contexts) for stream management without the full task-based model.

Note

If you are using a CUDASTF context, use ctx.async_resources() to ensure the same stream pools are shared between your code and the context’s internal operations.

Parameters:
  • async_resources – Handle managing the stream pools. Create a standalone async_resources_handle for context-free usage, or use ctx.async_resources() when working alongside a CUDASTF context.

  • for_computation – Hint for selecting which pool to use. When true, returns a stream from the computation pool; when false, returns a stream from the data transfer pool. Using separate pools for computation and transfers can improve overlapping. This is a performance hint and does not affect correctness.

Returns:

A decorated_stream containing the CUDA stream and metadata (device ID, pool index)

inline cudaStream_t pick_stream(
async_resources_handle &async_resources,
bool for_computation = true
) const#

Get a CUDA stream from the stream pool associated to this execution place.

This method can be used to obtain CUDA streams from execution places without requiring a CUDASTF context. This is useful when you want to use CUDASTF’s place abstractions (devices, green contexts) for stream management without the full task-based model.

Example usage without a context:

async_resources_handle resources;
exec_place place = exec_place::device(0);
cudaStream_t stream = place.pick_stream(resources);
myKernel<<<grid, block, 0, stream>>>(...);

Example usage with a context (sharing resources):

stream_ctx ctx;
exec_place place = exec_place::device(0);
cudaStream_t stream = place.pick_stream(ctx.async_resources());
// Stream comes from the same pool used by ctx internally

Note

If you are using a CUDASTF context, use ctx.async_resources() to ensure the same stream pools are shared between your code and the context’s internal operations.

Parameters:
  • async_resources – Handle managing the stream pools. Create a standalone async_resources_handle for context-free usage, or use ctx.async_resources() when working alongside a CUDASTF context.

  • for_computation – Hint for selecting which pool to use. When true, returns a stream from the computation pool; when false, returns a stream from the data transfer pool. Using separate pools for computation and transfers can improve overlapping. This is a performance hint and does not affect correctness. Defaults to true.

Returns:

A CUDA stream associated with this execution place

inline size_t stream_pool_size(
async_resources_handle &async_resources,
bool for_computation = true
) const#

Get the number of streams available in the pool for this execution place.

Parameters:
  • async_resources – Handle managing the stream pools

  • for_computation – Hint for selecting which pool to query (computation or transfer pool)

Returns:

The number of stream slots in the pool

inline ::std::vector<cudaStream_t> pick_all_streams(
async_resources_handle &async_resources,
bool for_computation = true
) const#

Get all streams from the pool associated to this execution place.

This method returns a vector containing all CUDA streams in the pool. Streams are created lazily, so calling this method will create any streams that haven’t been created yet.

Parameters:
  • async_resources – Handle managing the stream pools

  • for_computation – Hint for selecting which pool to use (computation or transfer pool)

Returns:

A vector of CUDA streams from the pool

inline const ::std::shared_ptr<impl> &get_impl() const#
inline exec_place activate() const#

Set computation to run on this place.

Returns:

exec_place The previous execution place. See deactivate below.

inline void deactivate(const exec_place &p) const#

Undoes the effect of activate.

Call with the previous exec_place object returned by activate.

Warning

Undefined behavior if you don’t pass the result of activate.

inline bool is_host() const#
inline bool is_device() const#
inline bool is_grid() const#
inline size_t size() const#
inline exec_place_grid as_grid() const#
inline size_t grid_dim(int axid_is) const#
inline dim4 grid_dims() const#
template<typename Fun>
inline auto operator->*(Fun &&fun) const#

Execute lambda on this place.

This method accepts a functor, saves the current CUDA device, changes it to the current execution place, invokes the lambda, and finally sets the current device back to the previous one. The last step is taken even if the lambda throws an exception.

Template Parameters:

Fun – A callable entity type

Parameters:

fun – Input functor that will be forwarded and executed

Returns:

auto the result of the executed functor.

inline exec_place(::std::shared_ptr<impl> pimpl)#

Public Static Functions

static inline exec_place_host host()#
static inline exec_place device_auto()#
static inline exec_place device(int devid)#
static inline exec_place green_ctx(const green_ctx_view &gc_view)#
static inline exec_place green_ctx(
const ::std::shared_ptr<green_ctx_view> &gc_view_ptr
)#
static inline exec_place_cuda_stream cuda_stream(cudaStream_t stream)#
static inline exec_place_cuda_stream cuda_stream(
const decorated_stream &dstream
)#
static inline exec_place current_device()#

Returns the currently active device.

Returns:

exec_place

static inline exec_place_grid all_devices()#
static inline exec_place_grid n_devices(size_t n, dim4 dims)#
static inline exec_place_grid n_devices(size_t n)#
static inline exec_place_grid repeat(const exec_place &e, size_t cnt)#

Creates a grid by replicating an execution place multiple times.

class impl#

Subclassed by cuda::experimental::stf::exec_place_cuda_stream::impl, cuda::experimental::stf::exec_place_green_ctx::impl, cuda::experimental::stf::exec_place_grid::impl, cuda::experimental::stf::exec_place_host::impl

Public Functions

impl() = default#
impl(const impl&) = delete#
impl &operator=(const impl&) = delete#
virtual ~impl() = default#
inline explicit impl(data_place place)#
inline virtual exec_place activate() const#
inline virtual void deactivate(const exec_place &prev) const#
inline virtual const data_place affine_data_place() const#
inline ::std::string to_string() const#
inline virtual bool is_host() const#
inline virtual bool is_device() const#
inline virtual bool is_grid() const#
inline virtual size_t size() const#
inline virtual void set_affine_data_place(data_place place)#
inline virtual bool operator==(const impl &rhs) const#
inline virtual stream_pool &get_stream_pool(
async_resources_handle &async_resources,
bool for_computation
) const#
inline decorated_stream getStream(
async_resources_handle &async_resources,
bool for_computation
) const#
class iterator#

an iterator class which goes over all subplaces in an exec place.

This is a trivial singleton unless we have a grid of places.

Public Functions

inline iterator(::std::shared_ptr<impl> impl, size_t index)#
inline exec_place operator*()#

Implementation deferred because we need the definition of exec_place_grid.

inline iterator &operator++()#
inline bool operator==(const iterator &other) const#
inline bool operator!=(const iterator &other) const#