[−][src]Struct rustacuda::context::CurrentContext
Type representing the top context in the thread-local stack.
Implementations
impl CurrentContext
[src]
pub fn get_cache_config() -> CudaResult<CacheConfig>
[src]
Returns the preferred cache configuration for the current context.
On devices where the L1 cache and shared memory use the same hardware resources, this
function returns the preferred cache configuration for the current context. For devices
where the size of the L1 cache and shared memory are fixed, this will always return
CacheConfig::PreferNone
.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let cache_config = CurrentContext::get_cache_config()?;
pub fn get_device() -> CudaResult<Device>
[src]
Return the device ID for the current context.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let device = CurrentContext::get_device()?;
pub fn get_flags() -> CudaResult<ContextFlags>
[src]
Return the context flags for the current context.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let flags = CurrentContext::get_flags()?;
pub fn get_resource_limit(resource: ResourceLimit) -> CudaResult<usize>
[src]
Return resource limits for the current context.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let stack_size = CurrentContext::get_resource_limit(ResourceLimit::StackSize)?;
pub fn get_shared_memory_config() -> CudaResult<SharedMemoryConfig>
[src]
Return resource limits for the current context.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let shared_mem_config = CurrentContext::get_shared_memory_config()?;
pub fn get_stream_priority_range() -> CudaResult<StreamPriorityRange>
[src]
Return the least and greatest stream priorities.
If the program attempts to create a stream with a priority outside of this range, it will be automatically clamped to within the valid range. If the device does not support stream priorities, the returned range will contain zeroes.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let priority_range = CurrentContext::get_stream_priority_range()?;
pub fn set_cache_config(cfg: CacheConfig) -> CudaResult<()>
[src]
Sets the preferred cache configuration for the current context.
On devices where L1 cache and shared memory use the same hardware resources, this sets the preferred cache configuration for the current context. This is only a preference. The driver will use the requested configuration if possible, but is free to choose a different configuration if required to execute the function.
This setting does nothing on devices where the size of the L1 cache and shared memory are fixed.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; CurrentContext::set_cache_config(CacheConfig::PreferL1)?;
pub fn set_resource_limit(
resource: ResourceLimit,
limit: usize
) -> CudaResult<()>
[src]
resource: ResourceLimit,
limit: usize
) -> CudaResult<()>
Sets a requested resource limit for the current context.
Note that this is only a request; the driver is free to modify the requested value to meet hardware requirements. Each limit has some specific restrictions.
StackSize
: Controls the stack size in bytes for each GPU threadPrintfFifoSize
: Controls the size in bytes of the FIFO used by theprintf()
device system call. This cannot be changed after a kernel has been launched which uses theprintf()
function.MallocHeapSize
: Controls the size in bytes of the heap used by themalloc()
andfree()
device system calls. This cannot be changed aftr a kernel has been launched which uses themalloc()
andfree()
system calls.DeviceRuntimeSyncDepth
: Controls the maximum nesting depth of a grid at which a thread can safely callcudaDeviceSynchronize()
. This cannot be changed after a kernel has been launched which uses the device runtime. When setting this limit, keep in mind that additional levels of sync depth require the driver to reserve large amounts of device memory which can no longer be used for device allocations.DeviceRuntimePendingLaunchCount
: Controls the maximum number of outstanding device runtime launches that can be made from the current context. A grid is outstanding from the point of the launch up until the grid is known to have completed. Keep in mind that increasing this limit will require the driver to reserve larger amounts of device memory which can no longer be used for device allocations.MaxL2FetchGranularity
: Controls the L2 fetch granularity. This is purely a performance hint and it can be ignored or clamped depending on the platform.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; CurrentContext::set_resource_limit(ResourceLimit::StackSize, 2048)?;
pub fn set_shared_memory_config(cfg: SharedMemoryConfig) -> CudaResult<()>
[src]
Sets the preferred shared memory configuration for the current context.
On devices with configurable shared memory banks, this function will set the context's shared memory bank size which is used for subsequent kernel launches.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; CurrentContext::set_shared_memory_config(SharedMemoryConfig::DefaultBankSize)?;
pub fn get_current() -> CudaResult<UnownedContext>
[src]
Returns a non-owning handle to the current context.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; let unowned = CurrentContext::get_current()?;
pub fn set_current<C: ContextHandle>(c: &C) -> CudaResult<()>
[src]
Set the given context as the current context for this thread.
If there is no context set for this thread, this pushes the given context onto the stack. If there is a context set for this thread, this replaces the top context on the stack with the given context.
Example
let context = Context::create_and_push(ContextFlags::MAP_HOST | ContextFlags::SCHED_AUTO, device)?; CurrentContext::set_current(&context)?;
pub fn synchronize() -> CudaResult<()>
[src]
Block to wait for a context's tasks to complete.
Trait Implementations
Auto Trait Implementations
impl RefUnwindSafe for CurrentContext
impl Send for CurrentContext
impl Sync for CurrentContext
impl Unpin for CurrentContext
impl UnwindSafe for CurrentContext
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,