diff --git a/include/stdexec/__detail/__associate.hpp b/include/stdexec/__detail/__associate.hpp new file mode 100644 index 000000000..b40e8be71 --- /dev/null +++ b/include/stdexec/__detail/__associate.hpp @@ -0,0 +1,252 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "__execution_fwd.hpp" + +#include "__basic_sender.hpp" +#include "__completion_signatures.hpp" +#include "__concepts.hpp" +#include "__config.hpp" +#include "__operation_states.hpp" +#include "__receivers.hpp" +#include "__scope_concepts.hpp" +#include "__sender_introspection.hpp" +#include "__senders_core.hpp" +#include "__senders.hpp" +#include "__sender_adaptor_closure.hpp" +#include "__transform_completion_signatures.hpp" +#include "__type_traits.hpp" + +#include +#include +#include + +namespace stdexec { + ///////////////////////////////////////////////////////////////////////////// + // [exec.associate] + namespace __associate { + template + struct __associate_data { + using __wrap_result_t = decltype(__declval<_Token&>().wrap(__declval<_Sender>())); + using __wrap_sender_t = std::remove_cvref_t<__wrap_result_t>; + + using __assoc_t = decltype(__declval<_Token&>().try_associate()); + + // NOTE: the spec says the deleter should be a lambda like so: + // + // using __sender_ref = std::unique_ptr< + // __wrap_sender_t, + // // this decltype() breaks things + // decltype([](auto* p) noexcept { std::destroy_at(p); }) + // >; + // + // but the above code ICEs gcc 11 and 12 (and maybe MSVC) + // so we declare a named callable + struct __deleter { + constexpr void operator()(__wrap_sender_t* p) const noexcept { + std::destroy_at(p); + } + }; + + using __sender_ref = std::unique_ptr<__wrap_sender_t, __deleter>; + + // BUGBUG: should the spec require __token to be declared as a const _Token, or should this be + // changed to declare __token as a mutable _Token? + explicit __associate_data(const _Token __token, _Sender&& __sndr) noexcept( + __nothrow_constructible_from<__wrap_sender_t, __wrap_result_t> + && noexcept(__token.wrap(static_cast<_Sender&&>(__sndr))) + && noexcept(__token.try_associate())) + : __sndr_(__token.wrap(static_cast<_Sender&&>(__sndr))) + , __assoc_([&] { + __sender_ref guard{std::addressof(__sndr_)}; + + auto assoc = __token.try_associate(); + + if (assoc) { + (void) guard.release(); + } + + return assoc; + }()) { + } + + __associate_data(const __associate_data& __other) noexcept( + __nothrow_copy_constructible<__wrap_sender_t> && noexcept(__other.__assoc_.try_associate())) + requires copy_constructible<__wrap_sender_t> + : __assoc_(__other.__assoc_.try_associate()) { + if (__assoc_) { + std::construct_at(&__sndr_, __other.__sndr_); + } + } + + __associate_data(__associate_data&& __other) + noexcept(__nothrow_move_constructible<__wrap_sender_t>) + : __associate_data(std::move(__other).release()) { + } + + ~__associate_data() { + if (__assoc_) { + std::destroy_at(&__sndr_); + } + } + + std::pair<__assoc_t, __sender_ref> release() && noexcept { + __sender_ref u(__assoc_ ? std::addressof(__sndr_) : nullptr); + return {std::move(__assoc_), std::move(u)}; + } + + private: + __associate_data(std::pair<__assoc_t, __sender_ref> __parts) + : __assoc_(std::move(__parts.first)) { + if (__assoc_) { + std::construct_at(&__sndr_, std::move(*__parts.second)); + } + } + + union { + __wrap_sender_t __sndr_; + }; + __assoc_t __assoc_; + }; + + template + __associate_data(_Token, _Sender&&) -> __associate_data<_Token, _Sender>; + + //////////////////////////////////////////////////////////////////////////////////////////////// + struct associate_t { + template + auto operator()(_Sender&& __sndr, _Token&& __token) const + noexcept(__nothrow_constructible_from< + __associate_data, _Sender>, + _Token, + _Sender + >) -> __well_formed_sender auto { + return __make_sexpr( + __associate_data(static_cast<_Token&&>(__token), static_cast<_Sender&&>(__sndr))); + } + + template + STDEXEC_ATTRIBUTE(always_inline) + auto operator()(_Token&& __token) const noexcept { + return __closure(*this, static_cast<_Token&&>(__token)); + } + }; + + // NOTE: the spec declares this class template inside the get_state function + // but I couldn't get that to build with Clang 21 so I moved it out to + // this namespace-scoped template and __uglified all the symbols + template + struct __op_state { + using __associate_data_t = std::remove_cvref_t<__data_of<_Sender>>; + using __assoc_t = __associate_data_t::__assoc_t; + using __sender_ref_t = __associate_data_t::__sender_ref; + + using __op_t = connect_result_t; + + __assoc_t __assoc_; + union { + _Receiver* __rcvr_; + __op_t __op_; + }; + + explicit __op_state(std::pair<__assoc_t, __sender_ref_t> parts, _Receiver& r) + : __assoc_(std::move(parts.first)) { + if (__assoc_) { + ::new ((void*) std::addressof(__op_)) + __op_t(connect(std::move(*parts.second), std::move(r))); + } else { + __rcvr_ = std::addressof(r); + } + } + + explicit __op_state(__associate_data_t&& ad, _Receiver& r) + : __op_state(std::move(ad).release(), r) { + } + + explicit __op_state(const __associate_data_t& ad, _Receiver& r) + requires copy_constructible<__associate_data_t> + : __op_state(__associate_data_t(ad).release(), r) { + } + + ~__op_state() { + if (__assoc_) { + std::destroy_at(&__op_); + } + } + + void __run() noexcept { + if (__assoc_) { + stdexec::start(__op_); + } else { + stdexec::set_stopped(std::move(*__rcvr_)); + } + } + }; + + struct __associate_impl : __sexpr_defaults { +#if 0 // TODO: I don't know how to implement this correctly + static constexpr auto get_attrs = [](__ignore, const _Child& __child) noexcept { + return __sync_attrs{__child}; + }; +#endif + + template + using __wrap_sender_of_t = + __copy_cvref_t<_Sender, typename __data_of>::__wrap_sender_t>; + + static constexpr auto get_completion_signatures = + [](_Sender&&, _Env&&...) noexcept + -> transform_completion_signatures< + __completion_signatures_of_t<__wrap_sender_of_t<_Sender>>, + completion_signatures + > { + static_assert(sender_expr_for<_Sender, associate_t>); + return {}; + }; + + static constexpr auto get_state = + [](_Self&& __self, _Receiver& __rcvr) noexcept( + (same_as<_Self, std::remove_cvref_t<_Self>> + || __nothrow_constructible_from, _Self>) && + __nothrow_callable< + connect_t, + typename std::remove_cvref_t<__data_of<_Self>>::__wrap_sender_t, + _Receiver + >) { + auto&& [_, data] = std::forward<_Self>(__self); + + using op_state_t = __op_state, _Receiver>; + return op_state_t{__forward_like<_Self>(data), __rcvr}; + }; + + static constexpr auto start = [](auto& __state, auto&) noexcept -> void { + __state.__run(); + }; + }; + } // namespace __associate + + using __associate::associate_t; + + /// @brief The associate sender adaptor, which associates a sender with the + /// async scope referred to by the given token + /// @hideinitializer + inline constexpr associate_t associate{}; + + template <> + struct __sexpr_impl : __associate::__associate_impl { }; +} // namespace stdexec diff --git a/include/stdexec/__detail/__concepts.hpp b/include/stdexec/__detail/__concepts.hpp index 5a02d6636..718731050 100644 --- a/include/stdexec/__detail/__concepts.hpp +++ b/include/stdexec/__detail/__concepts.hpp @@ -253,6 +253,12 @@ namespace stdexec { template concept __nothrow_copy_constructible = (__nothrow_constructible_from<_Ts, const _Ts&> && ...); + template + concept __nothrow_assignable_from = STDEXEC_IS_NOTHROW_ASSIGNABLE(_Ty, _A); + + template + concept __nothrow_move_assignable = (__nothrow_assignable_from<_Ts, _Ts> && ...); + template concept __decay_copyable = (constructible_from<__decay_t<_Ts>, _Ts> && ...); diff --git a/include/stdexec/__detail/__config.hpp b/include/stdexec/__detail/__config.hpp index 645c2ac51..871e48c4d 100644 --- a/include/stdexec/__detail/__config.hpp +++ b/include/stdexec/__detail/__config.hpp @@ -377,6 +377,12 @@ namespace __coro = std::experimental; # define STDEXEC_IS_TRIVIALLY_CONSTRUCTIBLE(...) std::is_trivially_constructible_v<__VA_ARGS__> #endif +#if STDEXEC_HAS_BUILTIN(__is_nothrow_assignable) || STDEXEC_MSVC() +# define STDEXEC_IS_NOTHROW_ASSIGNABLE(...) __is_nothrow_assignable(__VA_ARGS__) +#else +# define STDEXEC_IS_NOTHROW_ASSIGNABLE(...) std::is_nothrow_assignable_v<__VA_ARGS__> +#endif + #if STDEXEC_HAS_BUILTIN(__is_empty) || STDEXEC_MSVC() # define STDEXEC_IS_EMPTY(...) __is_empty(__VA_ARGS__) #else diff --git a/include/stdexec/__detail/__counting_scopes.hpp b/include/stdexec/__detail/__counting_scopes.hpp new file mode 100644 index 000000000..292db40a0 --- /dev/null +++ b/include/stdexec/__detail/__counting_scopes.hpp @@ -0,0 +1,611 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "__execution_fwd.hpp" + +#include "__concepts.hpp" +#include "__env.hpp" +#include "__receivers.hpp" +#include "__schedulers.hpp" +#include "__senders_core.hpp" +#include "__sender_introspection.hpp" +#include "__type_traits.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace stdexec { + class simple_counting_scope; + + namespace __counting_scopes { + struct __state_base { + __state_base* __next_; // intentionally uninitialized + + __state_base() = default; + __state_base(__state_base&&) = delete; + + virtual void __complete() noexcept = 0; + + protected: + ~__state_base() = default; + }; + + struct __scope_join_t { }; + + struct __scope_join_impl : __sexpr_defaults { + template + using __scheduler_of_t = __call_result_t; + + template + using __sched_sender_of_t = __call_result_t>; + + static constexpr auto get_completion_signatures = + [](const _Sender&, _Env&&) noexcept + -> __completion_signatures_of_t<__sched_sender_of_t<_Env>, _Env> { + return {}; + }; + + template + struct __state final : __state_base { + struct __rcvr_t { + using receiver_concept = receiver_t; + + _Rcvr& __rcvr_; + + void set_value() && noexcept { + stdexec::set_value(std::move(__rcvr_)); + } + + template + void set_error(E&& e) && noexcept { + stdexec::set_error(std::move(__rcvr_), static_cast(e)); + }; + + void set_stopped() && noexcept { + stdexec::set_stopped(std::move(__rcvr_)); + } + + decltype(auto) get_env() const noexcept { + return stdexec::get_env(__rcvr_); + } + }; + + using __sched_sender = __sched_sender_of_t>; + + using __op_t = connect_result_t<__sched_sender, __rcvr_t>; + + _Scope* __scope_; + _Rcvr& __receiver_; + __op_t __op_; + + __state(_Scope* __scope, _Rcvr& __rcvr) + noexcept(__nothrow_callable) + : __scope_(__scope) + , __receiver_(__rcvr) + , __op_( + stdexec::connect( + schedule(get_scheduler(stdexec::get_env(__rcvr))), + __rcvr_t(__rcvr))) { + } + + void __complete() noexcept override { + stdexec::start(__op_); + } + + void __complete_inline() noexcept { + set_value(std::move(__receiver_)); + } + }; + + // nvc++ needs help with CTAD for this class template + template + __state(_Scope*, _Rcvr&) -> __state<_Scope, _Rcvr>; + + static constexpr auto get_state = + [](_Sndr&& __sender, _Rcvr& __receiver) noexcept( + __nothrow_constructible_from< + __state<__data_of>, _Rcvr>, + __data_of>, + _Rcvr& + >) { + auto [_, self] = __sender; + return __state(self, __receiver); + }; + + static constexpr auto start = [](auto& __s, auto&) noexcept { + if (__s.__scope_->__start_join_sender(__s)) { + __s.__complete_inline(); + } + }; + }; + + template + struct __association_t { + constexpr __association_t() = default; + + constexpr __association_t(__association_t&& __other) noexcept + : __scope_(std::exchange(__other.__scope_, nullptr)) { + } + + ~__association_t() { + if (__scope_ != nullptr) { + __scope_->__disassociate(); + } + } + + __association_t& operator=(__association_t __rhs) noexcept { + std::swap(__scope_, __rhs.__scope_); + return *this; + } + + constexpr explicit operator bool() const noexcept { + return __scope_ != nullptr; + } + + __association_t try_associate() const noexcept { + if (__scope_) { + return __scope_->__try_associate(); + } else { + return __association_t(); + } + } + + private: + friend simple_counting_scope; + + _Scope* __scope_{}; + + constexpr __association_t(_Scope& __scope) noexcept + : __scope_(std::addressof(__scope)) { + } + }; + } // namespace __counting_scopes + + template <> + struct __sexpr_impl<__counting_scopes::__scope_join_t> : __counting_scopes::__scope_join_impl { }; + + class simple_counting_scope { + public: + using __assoc_t = __counting_scopes::__association_t; + + struct token { + template + _Sender&& wrap(_Sender&& __snd) const noexcept { + return static_cast<_Sender&&>(__snd); + } + + __assoc_t try_associate() const noexcept { + return __scope_->__try_associate(); + } + + private: + friend class simple_counting_scope; + + simple_counting_scope* __scope_; + + explicit token(simple_counting_scope* __scope) noexcept + : __scope_(__scope) { + } + }; + + // we represent the (count, state) pair in a single std::size_t by allocating + // the lower three bits to state, leaving all the rest of the bits for count; + // the result is that we can count up to MAX_SIZE_T >> 3 outstanding ops. + static constexpr std::size_t max_associations = std::numeric_limits::max() >> 3; + + simple_counting_scope() = default; + + simple_counting_scope(simple_counting_scope&&) = delete; + + ~simple_counting_scope() { + // there are three cases to consider here: + // 1. we're about to terminate, in which case memory ordering is irrelevant; + // 2. the scope is unused, which means there were never any associated operations + // to synchronize with, which again means the ordering doesn't matter; or + // 3. the scope was used and has been joined. + // + // In the third case, any threads that completed a join-sender have synchronized + // with all the now-completed associated operations but the scope may be destroyed + // on yet another thread so we ought to execute a load-acquire to ensure the + // current thread has properly synchronized. + auto bits = __bits_.load(std::memory_order_acquire); + if (!__destructible(bits)) { + std::terminate(); + } + } + + token get_token() noexcept { + return token{this}; + } + + void close() noexcept { + // we need store-release semantics to ensure this closure happens-before + // any subsequent calls to __try_associate that must fail as a result of + // this closure; we don't use acquire-release semantics because the caller + // is *sending* a signal, not receiving one + __bits_.fetch_or(__closed, std::memory_order_release); + } + + sender auto join() noexcept { + return __make_sexpr<__counting_scopes::__scope_join_t>(this); + } + + private: + friend __assoc_t; + + static constexpr std::size_t __closed{1ul}; + static constexpr std::size_t __join_needed{2ul}; + static constexpr std::size_t __join_running{4ul}; + + // Storage for the (count, state) pair referenced in the spec; we store + // the spec'd "state" in the lower three bits and the spec'd "count" in + // all the rest of the bits; the states named in the spec map like so: + // unused = all bits zero + // open = any count | __join_needed + // closed = any count | __join_needed | __closed + // open-and-joining = any count | __join_running | __join_needed + // closed-and-joining = any count | __join_running | __join_needed | __closed + // unused-and-closed = zero | __closed + // joined = zero | __closed + // + // INVARIANT: __bits_ = (count << 3) | (state & 7) + std::atomic __bits_{0ul}; + + // An intrusive singly-linked list of join-sender operation states; + // elements of the list have been "registered" to be completed when the + // last outstanding associated operation completes. The value can be + // the possibly-null pointer to the head of the list (where nullptr + // means the list is empty) or `this`, which is the sentinel value that + // indicates that the last associated operation has been disassociated + // and any previously-registered join operations have been (or are about + // to be) completed. + std::atomic __registeredJoinOps_{nullptr}; + + __assoc_t __try_associate() noexcept { + constexpr auto makeNewBits = [](std::size_t bits) noexcept { + // [exec.simple.counting.mem] paragraph 5 and 5.3 say there is no + // effect if state is closed or if count is equal to max_associations + // so we should not be calculating a new (count, state) pair if either + // of those conditions hold. + assert(!__is_closed(bits) && __count(bits) < max_associations); + + // [exec.simple.counting.mem] paragraph 5 + // Effects: .... Otherwise, if state is + // (5.1) -- unused, then increments count and changes state to open; + // (5.2) -- open or open-and-joining, then increments count; + // + // NOTE: we represent "open" with __join_needed, and "open-and-joining" + // with (__join_needed | __join_running) so we can implement the + // update to state by simply ensuring the __join_needed bit is + // set; incrementing count is done in the obvious way. + return __make_bits(__count(bits) + 1ul, __state(bits) | __join_needed); + }; + + // we might be about to observe that the scope has been closed; we should + // establish that the closure happened-before this attempt to associate + // so this needs to be a load-acquire + auto oldBits = __bits_.load(std::memory_order_acquire); + std::size_t newBits; //intentionally uninitialized + + do { + // [exec.simple.counting.mem] paragraph 5 + // Effects: if count is equal to max_associations then no effects. + // Otherwise, if state is + // ... + // (5.3) -- otherwise, no effect. + // + // NOTE: Paragraph 5.3 applies when state is closed, closed-and-joining, + // or joined, all of which can be detected by checking the closed bit + if (__is_closed(oldBits) || __count(oldBits) == max_associations) { + // Paragraph 6 (the Returns clause) says we return assoc-t() "otherwise", + // which applies when count is not incremented, i.e. right here. + return __assoc_t(); + } + + newBits = makeNewBits(oldBits); + } while (!__bits_.compare_exchange_weak( + oldBits, + newBits, + // on success we only need store-relaxed because we're "just" incrementing + // a reference count but on failure we need load-acquire to synchronize + // with the thread that closed the scope if we happen to observe that; it's + // UB for the on-failure ordering to be weaker than the on-success ordering + // so we have to use acquire for both. + std::memory_order_acquire)); + + // [exec.simple.counting.mem] paragraph 6 + // Returns: If count was incremented, an object of type assoc-t that is engaged + // and associated with *this, and assoc-t() otherwise. + // + // NOTE: we only break out of the while loop and execute this return statement + // if the CAS succeeded, the side effect of which is to increment count, + // so we must return "an object of type assoc-t that is engaged and + // associated with *this" here. + return __assoc_t{*this}; + } + + void __disassociate() noexcept { + // NOTE: The spec says, "Decrements count. If count is zero after + // decrementing and state is [joining] then changes state to + // joined...", which could be transliterated to code like so: + // + // auto oldCount = __count(__bits_.fetch_sub(1 << 3)); + // + // if (oldCount == 1) + // __bits_.store(__closed); + // + // but that would introduce a race condition: if the scope is + // open then another sender could be associated with the scope + // between the fetch_sub and the store, leading to an invalid + // state. Instead, we use a CAS loop to atomically update the + // count and state simultaneously. + + constexpr auto makeNewBits = [](std::size_t bits) noexcept { + // [exec.simple.counting.mem] paragraph 8 + // Effects: Decrements count. ... + const auto newCount = __count(bits) - 1ul; + // ... If count is zero after decrementing and state is open-and-joining + // or closed-and-joining, changes state to joined... + // + // NOTE: We can check for both open-and-joining and closed-and-joining by + // checking the joining bit; it doesn't matter whether the scope is + // open or closed, only whether a join-sender is pending or not. + const auto newState = (newCount == 0ul && __is_joining(bits) ? __closed : __state(bits)); + + assert(newCount < __count(bits)); + + return __make_bits(newCount, newState); + }; + + // relaxed is sufficient here because the CAS loop we're about to run won't + // complete until we've synchronized with acquire-release semantics + auto oldBits = __bits_.load(std::memory_order_relaxed); + std::size_t newBits; // intentionally uninitialized + + // [exec.simple.counting.mem] paragraph 7 + // Preconditions: count > 0 + assert(__count(oldBits) > 0ul); + + do { + newBits = makeNewBits(oldBits); + } while (!__bits_.compare_exchange_weak( + oldBits, + newBits, + // on success, we need store-release semantics to publish the consequences + // of the just-finished operation to other scope users, and we also need + // load-acquire semantics in case we're the last associated operation to + // complete and thus initiate the tear-down of the scope + std::memory_order_acq_rel, + // on failure, we're going to immediately try to synchronize again so we + // can get away with relaxed semantics + std::memory_order_relaxed)); + + if (!__is_joined(newBits)) { + assert(!__is_joining(newBits) || __count(newBits) > 0ul); + + // we didn't update to the joined state so we're done with enacting + // the required effects + return; + } + + // [exec.simple.counting.mem] paragraph 8 continued... + // [state has been updated to joined so] call complete() on all objects + // registered with *this + + constexpr auto dequeueWaitingJoinOps = [](auto* self) noexcept { + // leave __registeredJoinOps_ pointing at *this, which we use as a sentinel; + // any join operations that start after this exchange will observe the + // sentinel, conclude that the scope has already been joined, and thus + // complete inline + // + // we need acquire semantics to establish that this dequeue operation + // happens-after all the now-completed associated operations, and we + // need release semantics to ensure that any future join-senders that + // observe the sentinel value perform that observation with a happens-after + // relationship with the current update + void* waitingOps = self->__registeredJoinOps_.exchange(self, std::memory_order_acq_rel); + + // at this point, waitingOps had better be either nullptr or the address + // of a join operation waiting to be completed; otherwise, the upcoming + // static_cast is UB + assert(waitingOps != self); + + return static_cast<__counting_scopes::__state_base*>(waitingOps); + }; + + for (auto* ops = dequeueWaitingJoinOps(this); ops != nullptr; ops = ops->__next_) { + ops->__complete(); + } + } + + // returns true in the unused and unused-and-closed states; since the bit + // pattern for joined matches the unused-and-closed bit pattern, returns + // true in the joined state, too + constexpr static bool __is_unused(std::size_t __bits) noexcept { + return (__bits & __join_needed) == 0ul; + } + + // returns true in the unused, open, and open-and-joining states + constexpr static bool __is_open(std::size_t __bits) noexcept { + return (__bits & __closed) == 0ul; + } + + // returns true in the closed, closed-and-joining, unused-and-closed, and + // joined states + constexpr static bool __is_closed(std::size_t __bits) noexcept { + return !__is_open(__bits); + } + + // returns true in the joined state, which shares a representation with the + // unused-and-closed state; for the scope to be fully joined, the number of + // outstanding associated operations must be zero, so we check for exact + // equality with __closed rather than using it as a bit mask + constexpr static bool __is_joined(std::size_t __bits) noexcept { + return __bits == __closed; + } + + // returns true in the open-and-joining and closed-and-joining states + constexpr static bool __is_joining(std::size_t __bits) noexcept { + return (__bits & __join_running) != 0ul; + } + + // returns false in the unused, unused-and-closed, and joined states; + // return true in all other states + constexpr static bool __is_join_needed(std::size_t __bits) noexcept { + return !__is_unused(__bits); + } + + // returns true if the destructor is safe to run + constexpr static bool __destructible(std::size_t __bits) noexcept { + // acceptable terminal states are __bits == 0 and __bits == __closed; we + // can check for both at once by expecting __bits with the __closed bit + // cleared to be equal to zero + return (__bits & ~__closed) == 0ul; + } + + // extracts from __bits what the spec calls count, which is the number of + // outstanding operations associated with this scope + constexpr static std::size_t __count(std::size_t __bits) noexcept { + return __bits >> 3; + } + + // extracts from __bits what the spec calls state, which determines where + // this scope is in its lifecycle and which operations are valid + constexpr static std::size_t __state(std::size_t __bits) noexcept { + return __bits & 7ul; + } + + // composes __newCount and __newState into the packed representation we store + // in __bits + constexpr static std::size_t + __make_bits(std::size_t __newCount, std::size_t __newState) noexcept { + // no high bits set + assert(__count(__newCount << 3) == __newCount); + + // no high bits set + assert(__state(__newState) == __newState); + + return (__newCount << 3) | __state(__newState); + } + + friend __counting_scopes::__scope_join_impl; + + bool __start_join_sender(__counting_scopes::__state_base& __joinOp) noexcept { + // relaxed is sufficient because the CAS loop below will continue until + // we've synchronized + auto oldBits = __bits_.load(std::memory_order_relaxed); + + do { + // [exec.simple.counting.mem] para (9.1) + // unused, unused-and-closed, or joined -> joined + // + // NOTE: there's a spec bug; we need to move to the joined + // state and return true when count is zero, regardless + // of state + if (__count(oldBits) == 0ul) { + const auto newBits = __make_bits(__count(oldBits), __closed); + + assert(__is_joined(newBits)); + + // try to make it joined + if (__bits_.compare_exchange_weak( + oldBits, + newBits, + // on success, we need to publish to future callers of try_associate + // that the scope is closed and consume from all the now-completed + // associated operations any updates they made so we need + // acquire-release semantics + std::memory_order_acq_rel, + // on failure, relaxed is fine because we'll loop back and try + // again to synchronize + std::memory_order_relaxed)) { + return true; + } + } + // [exec.simple.counting.mem] para (9.2) + // open or open-and-joining -> open-and-joining + // [exec.simple.counting.mem] para (9.3) + // closed or closed-and-joining -> closed-and-joining + else { + assert(__is_join_needed(oldBits)); + + // try to make it {open|closed}-and-joining + const auto newBits = oldBits | __join_running; + + assert(__is_joining(newBits)); + + if (__bits_.compare_exchange_weak( + oldBits, + newBits, + // on success, relaxed is sufficient because __register will further synchronize; + // it's fine for the joining thread to synchronize with associated operations on + // __registeredJoinOps and not on __bits because __disassociate decrements the + // outstanding operation count with acquire-release semantics and then the last + // decrementer dequeues the list of registered joiners with acquire-release + // semantics, establishing that all decrements strongly happen-before the + // completion of any join operation. + // + // on failure, relaxed is sufficient because we'll loop around and try again + std::memory_order_relaxed)) { + return !__register(__joinOp); + } + } + } while (true); + } + + bool __register(__counting_scopes::__state_base& __joinOp) noexcept { + // we need acquire semantics in case the join operation being started is about to + // observe that the last decrement has already happened; in that case, we need to + // establish that all of the now-completed associated operations happen-before the + // completion of this join operation + auto* ptr = __registeredJoinOps_.load(std::memory_order_acquire); + + do { + if (ptr == this) { + // __registeredJoinOps_ == this when the list has been cleared + return false; + } + + // make __joinOp's __next_ point to the current head; note that, on the first + // iteration of this loop, this assignment is the first write to __next_ that + // establishes a non-indeterminate value for the variable + __joinOp.__next_ = static_cast<__counting_scopes::__state_base*>(ptr); + } while ( + // try to make the head point to __joinOp + __registeredJoinOps_.compare_exchange_weak( + ptr, + &__joinOp, + // on success, we need at least release semantics to ensure that the final + // disassociation can see the full join operation when it dequeues the list + // of registered operations with acquire semantics; however, the on-success + // ordering has to be at least as strong as the on-failure ordering, for + // which we need acquire semantics, so on-success has to be acquire-release. + std::memory_order_acq_rel, + // on failure, we need acquire semantics in case we're about to observe the + // sentinel value and return early without trying again + std::memory_order_acquire)); + + return true; + } + }; +} // namespace stdexec diff --git a/include/stdexec/__detail/__scope_concepts.hpp b/include/stdexec/__detail/__scope_concepts.hpp new file mode 100644 index 000000000..14bb1fcf4 --- /dev/null +++ b/include/stdexec/__detail/__scope_concepts.hpp @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "__execution_fwd.hpp" + +#include "__completion_signatures.hpp" +#include "__concepts.hpp" +#include "__env.hpp" +#include "__operation_states.hpp" +#include "__receivers.hpp" +#include "__senders_core.hpp" + +#include + +namespace stdexec { + ///////////////////////////////////////////////////////////////////////////// + // [exec.scope.concepts] + template + concept scope_association = movable<_Assoc> && __nothrow_move_constructible<_Assoc> + && __nothrow_move_assignable<_Assoc> && default_initializable<_Assoc> + && requires(const _Assoc assoc) { + { static_cast(assoc) } noexcept; + { assoc.try_associate() } -> same_as<_Assoc>; + }; + + namespace __scope_concepts { + struct __test_sender { + using sender_concept = stdexec::sender_t; + + using completion_signatures = stdexec::completion_signatures< + stdexec::set_value_t(int), + stdexec::set_error_t(std::exception_ptr), + stdexec::set_stopped_t() + >; + + struct __op { + using operation_state_concept = stdexec::operation_state_t; + + __op() = default; + __op(__op&&) = delete; + + void start() & noexcept { + } + }; + + template + __op connect(_Receiver) { + return {}; + } + }; + } // namespace __scope_concepts + + template + concept scope_token = copyable<_Token> && requires(const _Token token) { + { token.try_associate() } -> scope_association; + { token.wrap(__declval<__scope_concepts::__test_sender>()) } -> sender_in>; + }; +} // namespace stdexec diff --git a/include/stdexec/__detail/__sender_adaptor_closure.hpp b/include/stdexec/__detail/__sender_adaptor_closure.hpp index 93f76aebf..b7f3bb658 100644 --- a/include/stdexec/__detail/__sender_adaptor_closure.hpp +++ b/include/stdexec/__detail/__sender_adaptor_closure.hpp @@ -69,7 +69,8 @@ namespace stdexec { template _Closure> STDEXEC_ATTRIBUTE(always_inline) - auto operator|(_Sender&& __sndr, _Closure&& __clsur) -> __call_result_t<_Closure, _Sender> { + auto operator|(_Sender&& __sndr, _Closure&& __clsur) + noexcept(__nothrow_callable<_Closure, _Sender>) -> __call_result_t<_Closure, _Sender> { return static_cast<_Closure&&>(__clsur)(static_cast<_Sender&&>(__sndr)); } diff --git a/include/stdexec/__detail/__spawn.hpp b/include/stdexec/__detail/__spawn.hpp new file mode 100644 index 000000000..369e38067 --- /dev/null +++ b/include/stdexec/__detail/__spawn.hpp @@ -0,0 +1,188 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "__execution_fwd.hpp" + +#include "__concepts.hpp" +#include "__env.hpp" +#include "__queries.hpp" +#include "__receivers.hpp" +#include "__scope_concepts.hpp" +#include "__senders_core.hpp" +#include "__type_traits.hpp" +#include "__write_env.hpp" + +#include +#include +#include + +namespace stdexec { + ///////////////////////////////////////////////////////////////////////////// + // [exec.spawn] + namespace __spawn { + struct __spawn_state_base { + __spawn_state_base() = default; + + __spawn_state_base(__spawn_state_base&&) = delete; + + virtual void __complete() noexcept = 0; + + protected: + ~__spawn_state_base() = default; + }; + + struct __spawn_receiver { + using receiver_concept = receiver_t; + + __spawn_state_base* __state_; + + void set_value() && noexcept { + __state_->__complete(); + } + + void set_stopped() && noexcept { + __state_->__complete(); + } + }; + + template + struct __spawn_state final : __spawn_state_base { + using __op_t = connect_result_t<_Sender, __spawn_receiver>; + + __spawn_state(_Alloc __alloc, _Sender&& __sndr, _Token __token) + : __alloc_(std::move(__alloc)) + , __op_(connect(std::move(__sndr), __spawn_receiver(this))) + , __assoc_(__token.try_associate()) { + } + + void __complete() noexcept override { + [[maybe_unused]] + auto assoc = std::move(__assoc_); + + { + using traits = std::allocator_traits<_Alloc>::template rebind_traits<__spawn_state>; + typename traits::allocator_type alloc(__alloc_); + traits::destroy(alloc, this); + traits::deallocate(alloc, this, 1); + } + } + + void __run() noexcept { + if (__assoc_) { + start(__op_); + } else { + __complete(); + } + } + + private: + using __assoc_t = std::remove_cvref_t().try_associate())>; + + _Alloc __alloc_; + __op_t __op_; + __assoc_t __assoc_; + }; + + struct __choose_alloc_fn { + template + requires __callable + auto operator()(const _Env& __env, const _SenderEnv&) const { + return get_allocator(__env); + } + + template + requires(!__callable) + && __callable + auto operator()(const _Env&, const _SenderEnv& __env) const { + return get_allocator(__env); + } + + template + requires(!__callable) + && (!__callable) + std::allocator operator()(const _Env&, const _SenderEnv&) const { + return std::allocator(); + } + }; + + inline constexpr __choose_alloc_fn __choose_alloc{}; + + struct __choose_senv_fn { + template + requires __callable + const _Env& operator()(const _Env& __env, const _SenderEnv&) const { + return __env; + } + + template + requires(!__callable) + && __callable + auto operator()(const _Env& __env, const _SenderEnv& __sndrEnv) const { + return __env::__join(prop(get_allocator, get_allocator(__sndrEnv)), __env); + } + + template + requires(!__callable) + && (!__callable) + const _Env& operator()(const _Env& __env, const _SenderEnv&) const { + return __env; + } + }; + + inline constexpr __choose_senv_fn __choose_senv{}; + + struct spawn_t { + template + void operator()(_Sender&& __sndr, _Token&& __tkn) const { + return (*this)(static_cast<_Sender&&>(__sndr), static_cast<_Token&&>(__tkn), env<>{}); + } + + template + void operator()(_Sender&& __sndr, _Token&& __tkn, _Env&& __env) const { + auto wrappedSender = __tkn.wrap(static_cast<_Sender&&>(__sndr)); + auto sndrEnv = get_env(wrappedSender); + + using raw_alloc = decltype(__choose_alloc(__env, sndrEnv)); + + auto senderWithEnv = write_env(std::move(wrappedSender), __choose_senv(__env, sndrEnv)); + + using spawn_state_t = + __spawn_state, decltype(senderWithEnv)>; + + using traits = std::allocator_traits::template rebind_traits; + typename traits::allocator_type alloc(__choose_alloc(__env, sndrEnv)); + + auto* op = traits::allocate(alloc, 1); + + try { + traits::construct( + alloc, op, alloc, std::move(senderWithEnv), static_cast<_Token&&>(__tkn)); + } catch (...) { + traits::deallocate(alloc, op, 1); + throw; + } + + op->__run(); + } + }; + } // namespace __spawn + + using __spawn::spawn_t; + + inline constexpr spawn_t spawn{}; +} // namespace stdexec diff --git a/include/stdexec/__detail/__stop_when.hpp b/include/stdexec/__detail/__stop_when.hpp new file mode 100644 index 000000000..00e97e22e --- /dev/null +++ b/include/stdexec/__detail/__stop_when.hpp @@ -0,0 +1,174 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#pragma once + +#include "__execution_fwd.hpp" + +#include "__basic_sender.hpp" +#include "__concepts.hpp" +#include "__env.hpp" +#include "__sender_introspection.hpp" +#include "__senders_core.hpp" +#include "__stop_token.hpp" + +#include +#include +#include + +namespace stdexec { + ///////////////////////////////////////////////////////////////////////////// + // [exec.stop.when] + namespace __stop_when_ { + + //////////////////////////////////////////////////////////////////////////////////////////////// + struct __stop_when_t { + template + constexpr _Sender&& operator()(_Sender&& __sndr, _Token&&) const noexcept { + return static_cast<_Sender&&>(__sndr); + } + + template + constexpr auto operator()(_Sender&& __sndr, _Token&& __token) const noexcept( + __nothrow_constructible_from, _Sender> + && __nothrow_constructible_from, _Token>) { + return __make_sexpr<__stop_when_t>( + static_cast<_Token&&>(__token), static_cast<_Sender&&>(__sndr)); + } + }; + + struct __stop_when_impl : __sexpr_defaults { + static constexpr auto get_completion_signatures = + [](_Sender&&, _Env&&...) noexcept + -> __completion_signatures_of_t<__child_of<_Sender>, _Env...> { + static_assert(sender_expr_for<_Sender, __stop_when_t>); + return {}; + }; + + static constexpr auto get_env = + [](__ignore, const auto& __state, const auto& __rcvr) noexcept { + return __env::__join(prop(get_stop_token, __state), stdexec::get_env(__rcvr)); + }; + + template + struct __fused_token { + _Token1 __tkn1_; + _Token2 __tkn2_; + + friend constexpr bool operator==(const __fused_token&, const __fused_token&) = default; + + bool stop_requested() const noexcept { + return __tkn1_.stop_requested() || __tkn2_.stop_requested(); + } + + bool stop_possible() const noexcept { + return __tkn1_.stop_possible() || __tkn2_.stop_possible(); + } + + template + struct callback_type { + struct __cb { + callback_type* self; + + void operator()() noexcept { + (*self)(); + } + }; + + using __cb1_t = _Token1::template callback_type<__cb>; + using __cb2_t = _Token2::template callback_type<__cb>; + + _Fn __fn_; + [[no_unique_address]] + std::atomic __called_{false}; + __cb1_t __cb1_; + __cb2_t __cb2_; + + template + requires constructible_from<_Fn, _C> + explicit callback_type(__fused_token&& __ftkn, _C&& __fn) + noexcept(__nothrow_constructible_from<_Fn, _C>) + : __fn_(static_cast<_C&&>(__fn)) + , __cb1_(std::move(__ftkn.__tkn1_), __cb(this)) + , __cb2_(std::move(__ftkn.__tkn2_), __cb(this)) { + } + + template + requires constructible_from<_Fn, _C> + explicit callback_type(const __fused_token& __ftkn, _C&& __fn) + noexcept(__nothrow_constructible_from<_Fn, _C>) + : __fn_(static_cast<_C&&>(__fn)) + , __cb1_(__ftkn.__tkn1_, __cb(this)) + , __cb2_(__ftkn.__tkn2_, __cb(this)) { + } + + callback_type(callback_type&&) = delete; + + private: + void operator()() noexcept { + if (!__called_.exchange(true, std::memory_order_relaxed)) { + __fn_(); + } + } + }; + }; + + struct __make_token_fn { + template + requires stoppable_token> + && unstoppable_token> + std::remove_cvref_t<_SenderToken> + operator()(_SenderToken&& __sndrToken, _ReceiverToken&&) const noexcept { + // when the receiver's stop token is unstoppable, the net token is just + // the sender's captured token + return __sndrToken; + } + + template + requires stoppable_token> + && stoppable_token> + __fused_token, std::remove_cvref_t<_ReceiverToken>> + operator()(_SenderToken&& __sndrToken, _ReceiverToken&& __rcvrToken) const noexcept { + // when the receiver's stop token is stoppable, the net token must be + // a fused token that responds to signals from both the sender's captured + // token and the receiver's token + return { + static_cast<_SenderToken&&>(__sndrToken), static_cast<_ReceiverToken&&>(__rcvrToken)}; + } + }; + + static constexpr auto get_state = + [](_Self&& __self, _Receiver& __rcvr) noexcept { + return __sexpr_apply( + static_cast<_Self&&>(__self), [&__rcvr](__ignore, auto&& token, __ignore) noexcept { + return __make_token_fn{}( + static_cast(token), get_stop_token(stdexec::get_env(__rcvr))); + }); + }; + }; + } // namespace __stop_when_ + + using __stop_when_::__stop_when_t; + + /// @brief The stop-when sender adaptor, which fuses an additional stop token + /// into its child sender such that the sender responds to stop + /// requests from both the given stop token and the receiver's token + /// @hideinitializer + inline constexpr __stop_when_t __stop_when{}; + + template <> + struct __sexpr_impl<__stop_when_t> : __stop_when_::__stop_when_impl { }; +} // namespace stdexec diff --git a/include/stdexec/execution.hpp b/include/stdexec/execution.hpp index 6f282bbbd..b01060d24 100644 --- a/include/stdexec/execution.hpp +++ b/include/stdexec/execution.hpp @@ -19,11 +19,13 @@ // include these after __execution_fwd.hpp #include "__detail/__as_awaitable.hpp" // IWYU pragma: export +#include "__detail/__associate.hpp" // IWYU pragma: export #include "__detail/__basic_sender.hpp" // IWYU pragma: export #include "__detail/__bulk.hpp" // IWYU pragma: export #include "__detail/__completion_signatures.hpp" // IWYU pragma: export #include "__detail/__connect_awaitable.hpp" // IWYU pragma: export #include "__detail/__continues_on.hpp" // IWYU pragma: export +#include "__detail/__counting_scopes.hpp" // IWYU pragma: export #include "__detail/__cpo.hpp" // IWYU pragma: export #include "__detail/__debug.hpp" // IWYU pragma: export #include "__detail/__domain.hpp" // IWYU pragma: export @@ -47,8 +49,10 @@ #include "__detail/__run_loop.hpp" // IWYU pragma: export #include "__detail/__schedule_from.hpp" // IWYU pragma: export #include "__detail/__schedulers.hpp" // IWYU pragma: export +#include "__detail/__scope_concepts.hpp" // IWYU pragma: export #include "__detail/__senders.hpp" // IWYU pragma: export #include "__detail/__sender_adaptor_closure.hpp" // IWYU pragma: export +#include "__detail/__spawn.hpp" // IWYU pragma: export #include "__detail/__split.hpp" // IWYU pragma: export #include "__detail/__start_detached.hpp" // IWYU pragma: export #include "__detail/__starts_on.hpp" // IWYU pragma: export diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index f403432e9..f8cea6a9c 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -29,6 +29,8 @@ set(stdexec_test_sources stdexec/concepts/test_concepts_receiver.cpp stdexec/concepts/test_concept_operation_state.cpp stdexec/concepts/test_concepts_sender.cpp + stdexec/concepts/test_concepts_scope_association.cpp + stdexec/concepts/test_concepts_scope_token.cpp stdexec/concepts/test_awaitables.cpp stdexec/algos/factories/test_just.cpp stdexec/algos/factories/test_transfer_just.cpp @@ -36,6 +38,7 @@ set(stdexec_test_sources stdexec/algos/factories/test_just_stopped.cpp stdexec/algos/factories/test_read.cpp stdexec/algos/factories/test_schedule.cpp + stdexec/algos/adaptors/test_associate.cpp stdexec/algos/adaptors/test_starts_on.cpp stdexec/algos/adaptors/test_on.cpp stdexec/algos/adaptors/test_on2.cpp @@ -56,14 +59,17 @@ set(stdexec_test_sources stdexec/algos/adaptors/test_stopped_as_error.cpp stdexec/algos/adaptors/test_ensure_started.cpp stdexec/algos/adaptors/test_write_env.cpp + stdexec/algos/adaptors/test_stop_when.cpp stdexec/algos/consumers/test_start_detached.cpp stdexec/algos/consumers/test_sync_wait.cpp + stdexec/algos/consumers/test_spawn.cpp stdexec/algos/other/test_execute.cpp stdexec/detail/test_completion_signatures.cpp stdexec/detail/test_utility.cpp stdexec/queries/test_env.cpp stdexec/queries/test_get_forward_progress_guarantee.cpp stdexec/queries/test_forwarding_queries.cpp + stdexec/types/test_simple_counting_scope.cpp ) add_library(common_test_settings INTERFACE) diff --git a/test/stdexec/algos/adaptors/test_associate.cpp b/test/stdexec/algos/adaptors/test_associate.cpp new file mode 100644 index 000000000..f70217389 --- /dev/null +++ b/test/stdexec/algos/adaptors/test_associate.cpp @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include + +#include +#include + +namespace ex = stdexec; + +namespace { + TEST_CASE("associate returns a sender", "[adaptors][associate]") { + using snd_t = decltype(ex::associate(ex::just(), null_token{})); + + STATIC_REQUIRE(ex::sender); + STATIC_REQUIRE(ex::sender); + STATIC_REQUIRE(ex::sender); + } + + TEST_CASE("associate is appropriately noexcept", "[adaptors][associate]") { + // double-check our dependencies + STATIC_REQUIRE(noexcept(ex::just())); + STATIC_REQUIRE(noexcept(null_token{})); + + // null_token is no-throw default constructible and tokens must be no-throw + // copyable and movable so this whole thing had better be no-throw + STATIC_REQUIRE(noexcept(ex::associate(null_token{}))); + + // constructing and passing in a no-throw sender should let the whole + // expression be no-throw + STATIC_REQUIRE(noexcept(ex::associate(ex::just(), null_token{}))); + STATIC_REQUIRE(noexcept(ex::just() | ex::associate(null_token{}))); + + // conversely, trafficking in senders with potentially-throwing copy + // constructors should lead to the whole expression becoming potentially-throwing + const auto justString = ex::just(std::string{"Copying strings is potentially-throwing"}); + STATIC_REQUIRE(!noexcept(ex::associate(justString, null_token{}))); + STATIC_REQUIRE(!noexcept(justString | ex::associate(null_token{}))); + (void) justString; + } + + template + constexpr bool expected_completion_signatures() { + using expected_sigs = ex::completion_signatures; + using actual_sigs = ex::completion_signatures_of_t; + return expected_sigs{} == actual_sigs{}; + } + + TEST_CASE("associate has appropriate completion signatures", "[adaptors][associate]") { + { + using snd_t = decltype(ex::associate(ex::just(), null_token{})); + + STATIC_REQUIRE( + expected_completion_signatures()); + + STATIC_REQUIRE( + expected_completion_signatures()); + + STATIC_REQUIRE( + expected_completion_signatures()); + } + + { + using snd_t = decltype(ex::associate(ex::just(std::string{}), null_token{})); + + STATIC_REQUIRE( + expected_completion_signatures()); + + STATIC_REQUIRE( + expected_completion_signatures()); + + STATIC_REQUIRE( + expected_completion_signatures< + const snd_t&, + ex::set_value_t(std::string), + ex::set_stopped_t() + >()); + } + + { + using snd_t = decltype(ex::associate(ex::just_stopped(), null_token{})); + + STATIC_REQUIRE(expected_completion_signatures()); + + STATIC_REQUIRE(expected_completion_signatures()); + + STATIC_REQUIRE(expected_completion_signatures()); + } + + { + using snd_t = decltype(ex::associate(ex::just_error(5), null_token{})); + + STATIC_REQUIRE( + expected_completion_signatures()); + + STATIC_REQUIRE( + expected_completion_signatures()); + + STATIC_REQUIRE( + expected_completion_signatures()); + } + + { + int i = 42; + using snd_t = decltype(ex::associate(ex::just(std::ref(i)), null_token{})); + + STATIC_REQUIRE( + expected_completion_signatures< + snd_t, + ex::set_value_t(std::reference_wrapper), + ex::set_stopped_t() + >()); + + STATIC_REQUIRE( + expected_completion_signatures< + snd_t&, + ex::set_value_t(std::reference_wrapper), + ex::set_stopped_t() + >()); + + STATIC_REQUIRE( + expected_completion_signatures< + const snd_t&, + ex::set_value_t(std::reference_wrapper), + ex::set_stopped_t() + >()); + } + } + + TEST_CASE("associate is the identity with null_token", "[adaptors][associate]") { + auto checkForIdentity = [](ex::sender auto&& snd, V... values) { + // wait_for_values wants prvalue expected values + wait_for_value(snd, V(values)...); + wait_for_value(std::as_const(snd), V(values)...); + wait_for_value(std::move(snd), V(values)...); + }; + + // nullary set_value + checkForIdentity(ex::just() | ex::associate(null_token{})); + + // unary set_value + checkForIdentity(ex::just(42) | ex::associate(null_token{}), 42); + + // binary set_value + checkForIdentity(ex::just(42, 67) | ex::associate(null_token{}), 42, 67); + + // set_value of a reference + int i = 42; + checkForIdentity(ex::just(std::ref(i)) | ex::associate(null_token{}), std::ref(i)); + + // passing set_value(int) through to another adaptor + checkForIdentity( + ex::just(42) | ex::associate(null_token{}) | ex::then([](int i) noexcept { return i; }), 42); + + // passing set_error(int) through to another adaptor + checkForIdentity( + ex::just_error(42) | ex::associate(null_token{}) | ex::upon_error([](int i) { return i; }), + 42); + + // passing set_stopped() through to another adaptor + checkForIdentity( + ex::just_stopped() | ex::associate(null_token{}) + | ex::upon_stopped([]() noexcept { return 42; }), + 42); + } + + struct expired_token { + struct assoc { + constexpr operator bool() const noexcept { + return false; + } + + constexpr assoc try_associate() const noexcept { + return {}; + } + }; + + template + constexpr Sender&& wrap(Sender&& sndr) const noexcept { + return std::forward(sndr); + } + + constexpr assoc try_associate() const noexcept { + return {}; + } + }; + + TEST_CASE("associate is just_stopped with expired_token", "[adaptors][associate]") { + wait_for_value( + ex::just(true) | ex::associate(expired_token{}) + | ex::upon_stopped([]() noexcept { return false; }), + false); + } + + struct scope { + bool open{true}; + + struct assoc { + constexpr operator bool() const noexcept { + return !!scope_; + } + + constexpr assoc try_associate() const noexcept { + return assoc{scope_ && scope_->open ? scope_ : nullptr}; + } + + const scope* scope_; + }; + + struct token { + template + constexpr Sender&& wrap(Sender&& sndr) const noexcept { + return std::forward(sndr); + } + + constexpr assoc try_associate() const noexcept { + return assoc{scope_->open ? scope_ : nullptr}; + } + + const scope* scope_; + }; + + constexpr token get_token() const noexcept { + return token{this}; + } + }; + + TEST_CASE( + "copying an associate-sender re-queries for a new association", + "[adaptors][associate]") { + STATIC_REQUIRE(ex::scope_token); + STATIC_REQUIRE(ex::scope_association); + + scope s; + + auto snd = ex::associate(ex::just(42), s.get_token()); + + // expect this copy of snd to complete with a value because the scope is still open + wait_for_value(snd | ex::upon_stopped([]() noexcept { return 67; }), 42); + + // close the scope + s.open = false; + + // now expect the copy to complete with stopped because we closed the scope + wait_for_value(snd | ex::upon_stopped([]() noexcept { return 67; }), 67); + + // the original should complete with a value even though it's started after closing the scope + wait_for_value(std::move(snd), 42); + } + + TEST_CASE( + "the sender argument is eagerly destroyed when try_associate fails", + "[adaptors][associate]") { + bool deleted = false; + using deleter_t = decltype([](bool* p) noexcept { *p = true; }); + std::unique_ptr ptr(&deleted); + + auto snd = ex::just(std::move(ptr)) | ex::associate(expired_token{}); + + REQUIRE(deleted == true); + + STATIC_REQUIRE(!std::copy_constructible); + (void) snd; + } + + // TODO: check the pass-through nature of __sync_attrs + // TODO: check the pass-through stop request behaviour + // TODO: confirm timing of destruction of opstate relative to release of association + // TODO: confirm that the TODO list is exhaustive +} // namespace diff --git a/test/stdexec/algos/adaptors/test_stop_when.cpp b/test/stdexec/algos/adaptors/test_stop_when.cpp new file mode 100644 index 000000000..d522e16d0 --- /dev/null +++ b/test/stdexec/algos/adaptors/test_stop_when.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include +#include +#include + +namespace ex = stdexec; + +namespace { + TEST_CASE("stop-when of an unstoppable token is the identity", "[adaptors][stop-when]") { + auto snd = ex::just(42); + auto checkIdentity = [](auto&& snd) { + auto&& result = ex::__stop_when(std::forward(snd), ex::never_stop_token{}); + + REQUIRE(&snd == &result); + }; + + checkIdentity(snd); + checkIdentity(std::as_const(snd)); + checkIdentity(std::move(snd)); + } + + TEST_CASE("stop-when(just(), token) returns a sender", "[adaptors][stop-when]") { + ex::inplace_stop_source source; + auto snd = ex::__stop_when(ex::just(), source.get_token()); + STATIC_REQUIRE(ex::sender); + wait_for_value(snd); + } + + auto isTokenStoppable() { + return ex::read_env(ex::get_stop_token) + | ex::then([](auto token) noexcept { return token.stop_possible(); }); + } + + TEST_CASE( + "stop-when substitutes its token when the receiver's token is unstoppable", + "[adaptors][stop-when]") { + + // check that, by default, wait_for_value provides an unstoppable stop token + wait_for_value(isTokenStoppable(), false); + + // now, check that stop-when mixes in a stoppable token + + ex::inplace_stop_source source; + + REQUIRE(source.get_token().stop_possible()); + + wait_for_value(ex::__stop_when(isTokenStoppable(), source.get_token()), true); + } + + TEST_CASE( + "stop-when fuses its token with the receiver's when both are stoppable", + "[adaptors][stop-when]") { + ex::inplace_stop_source source; + wait_for_value( + ex::__stop_when(ex::__stop_when(isTokenStoppable(), source.get_token()), source.get_token()), + true); + } + + template + ex::sender auto make_stop_callback(Fn&& fn) noexcept { + return ex::read_env(ex::get_stop_token) + | ex::then([fn = std::forward(fn)](auto token) mutable noexcept { + using cb_t = decltype(token)::template callback_type>; + return std::optional(std::in_place, std::move(token), std::move(fn)); + }); + } + + TEST_CASE("callbacks registered with stop-when's token can be invoked", "[adaptors][stop-when]") { + int invokeCount = 0; + auto snd = make_stop_callback([&invokeCount]() noexcept { invokeCount++; }); + + { + ex::inplace_stop_source source; + wait_for_value( + snd | ex::then([&](auto&& optCallback) noexcept { + source.request_stop(); + optCallback.reset(); + return invokeCount; + }), + 0); + } + + { + ex::inplace_stop_source source; + + wait_for_value( + ex::__stop_when(snd, source.get_token()) | ex::then([&](auto&& optCallback) noexcept { + source.request_stop(); + optCallback.reset(); + return invokeCount; + }), + 1); + } + + { + ex::inplace_stop_source source1; + ex::inplace_stop_source source2; + + wait_for_value( + ex::__stop_when(snd, source2.get_token()) | ex::then([&](auto&& optCallback) noexcept { + source1.request_stop(); + source2.request_stop(); + optCallback.reset(); + return invokeCount; + }) | ex::write_env(ex::prop(ex::get_stop_token, source1.get_token())), + 2); + } + } +} // namespace diff --git a/test/stdexec/algos/consumers/test_spawn.cpp b/test/stdexec/algos/consumers/test_spawn.cpp new file mode 100644 index 000000000..b19e8e597 --- /dev/null +++ b/test/stdexec/algos/consumers/test_spawn.cpp @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +#include +#include +#include +#include + +namespace ex = stdexec; + +namespace { + // a sender adaptor that prepends the provided environment onto the provided sender's environment + struct with_attrs_t { + template + auto operator()(Sender&& sender, Env&& env) const noexcept( + std::is_nothrow_constructible_v, Sender> + && std::is_nothrow_constructible_v, Env>) { + return ex::__make_sexpr(std::forward(env), std::forward(sender)); + } + }; + + inline constexpr with_attrs_t with_attrs{}; + + struct with_attrs_impl : ex::__sexpr_defaults { + static constexpr auto get_attrs = + [](const Env& env, const ex::sender auto& child) noexcept { + return ex::__env::__join(env, ex::get_env(child)); + }; + + static constexpr auto get_completion_signatures = + [](Sender&&, Env&&...) noexcept + -> ex::__completion_signatures_of_t>, Env...> { + return {}; + }; + }; + + struct counting_resource : std::pmr::memory_resource { + counting_resource() noexcept + : counting_resource(*std::pmr::new_delete_resource()) { + } + + explicit counting_resource(std::pmr::memory_resource& upstream) noexcept + : upstream_(upstream) { + } + + counting_resource(counting_resource&&) = delete; + + ~counting_resource() = default; + + std::intmax_t allocated() const noexcept { + return allocated_; + } + + private: + void* do_allocate(std::size_t bytes, std::size_t alignment) override { + auto ret = upstream_.allocate(bytes, alignment); + allocated_ += bytes; + return ret; + } + + void do_deallocate(void* p, std::size_t bytes, std::size_t alignment) override { + allocated_ -= bytes; + upstream_.deallocate(p, bytes, alignment); + } + + bool do_is_equal(const std::pmr::memory_resource& other) const noexcept override { + auto* downCast = dynamic_cast(&other); + return downCast != nullptr && (upstream_ == downCast->upstream_); + } + + std::pmr::memory_resource& upstream_; + std::intmax_t allocated_{}; + }; + + struct scope_with_alloc { + std::pmr::polymorphic_allocator<> alloc; + + struct token : null_token { + const scope_with_alloc* scope_; + + template + auto wrap(Sender&& sender) const + noexcept(std::is_nothrow_constructible_v, Sender>) { + return with_attrs(std::forward(sender), ex::prop(ex::get_allocator, scope_->alloc)); + } + }; + + token get_token() const noexcept { + return token{{}, this}; + } + }; +} // namespace + +template <> +struct ex::__sexpr_impl : with_attrs_impl { }; + +namespace { + TEST_CASE("Trivial spawns compile", "[consumers][spawn]") { + ex::spawn(ex::just(), null_token{}); + ex::spawn(ex::just_stopped(), null_token{}); + } + + TEST_CASE("spawn doesn't leak", "[consumers][spawn]") { + counting_resource rsc; + std::pmr::polymorphic_allocator<> alloc(&rsc); + + REQUIRE(rsc.allocated() == 0); + + ex::spawn( + ex::read_env(ex::get_allocator) | ex::then([&](auto&& envAlloc) noexcept { + // check that the allocator provided to spawn is in our environment + REQUIRE(alloc == envAlloc); + // check that we actually allocated something to run this op + REQUIRE(rsc.allocated() > 0); + }), + null_token{}, + ex::prop(ex::get_allocator, alloc)); + + REQUIRE(rsc.allocated() == 0); + } + + TEST_CASE("spawn reads an allocator from the sender's environment", "[consumers][spawn]") { + counting_resource rsc; + std::pmr::polymorphic_allocator<> alloc(&rsc); + + scope_with_alloc scope{alloc}; + + REQUIRE(rsc.allocated() == 0); + + ex::spawn( + ex::read_env(ex::get_allocator) | ex::then([&](auto&& envAlloc) noexcept { + // we should've pulled the scope's allocator into our environment + REQUIRE(alloc == envAlloc); + + // we should've allocated some memory for this operation + REQUIRE(rsc.allocated() > 0); + }), + scope.get_token()); + + REQUIRE(rsc.allocated() == 0); + } + + TEST_CASE( + "The allocator provided directly to spawn overrides the allocator in the sender's environment", + "[consumers][spawn]") { + + counting_resource rsc1; + + std::array buffer{}; + std::pmr::monotonic_buffer_resource bumpAlloc(buffer.data(), buffer.size()); + + counting_resource rsc2(bumpAlloc); + + std::pmr::polymorphic_allocator<> alloc1(&rsc1); + std::pmr::polymorphic_allocator<> alloc2(&rsc2); + + REQUIRE(alloc1 != alloc2); + + scope_with_alloc scope{alloc1}; + + REQUIRE(rsc1.allocated() == 0); + REQUIRE(rsc2.allocated() == 0); + + ex::spawn( + ex::read_env(ex::get_allocator) | ex::then([&](auto& envAlloc) noexcept { + // the allocator in the environment should be the one provided to spawn + // as an explicit argument and not the one provided by the scope + REQUIRE(alloc1 != envAlloc); + REQUIRE(alloc2 == envAlloc); + + // we should have allocated some memory for the op from rsc2 but not from rsc + REQUIRE(rsc1.allocated() == 0); + REQUIRE(rsc2.allocated() > 0); + }), + scope.get_token(), + ex::prop(ex::get_allocator, alloc2)); + + REQUIRE(rsc1.allocated() == 0); + REQUIRE(rsc2.allocated() == 0); + } + + TEST_CASE("spawn tolerates throwing scope tokens", "[consumers][spawn]") { + counting_resource rsc; + std::pmr::polymorphic_allocator alloc(&rsc); + + struct throwing_token : null_token { + const counting_resource* rsc; + + assoc try_associate() const { + REQUIRE(rsc->allocated() > 0); + throw std::runtime_error("nope"); + } + }; + + REQUIRE(rsc.allocated() == 0); + + bool threw = false; + try { + ex::spawn(ex::just(), throwing_token{{}, &rsc}, ex::prop(ex::get_allocator, alloc)); + } catch (const std::runtime_error& e) { + threw = true; + REQUIRE(std::string{"nope"} == e.what()); + } + + REQUIRE(threw); + + REQUIRE(rsc.allocated() == 0); + } + + TEST_CASE("spawn tolerates expired scope tokens", "[consumers][spawn]") { + struct expired_token : null_token { // inherit the wrap method template + const counting_resource* rsc; + bool* tried; + + struct assoc { + constexpr explicit operator bool() const noexcept { + return false; + } + + constexpr assoc try_associate() const noexcept { + return {}; + } + }; + + assoc try_associate() const { + REQUIRE(rsc->allocated() > 0); + *tried = true; + return {}; + } + }; + + counting_resource rsc; + std::pmr::polymorphic_allocator alloc(&rsc); + + REQUIRE(rsc.allocated() == 0); + + bool triedToAssociate = false; + + ex::spawn( + ex::just(), expired_token{{}, &rsc, &triedToAssociate}, ex::prop(ex::get_allocator, alloc)); + + REQUIRE(rsc.allocated() == 0); + REQUIRE(triedToAssociate); + } +} // namespace diff --git a/test/stdexec/concepts/test_concepts_scope_association.cpp b/test/stdexec/concepts/test_concepts_scope_association.cpp new file mode 100644 index 000000000..c77bcc6fa --- /dev/null +++ b/test/stdexec/concepts/test_concepts_scope_association.cpp @@ -0,0 +1,191 @@ +/* + * Copyright (c) 2022 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace ex = stdexec; + +namespace { + + // a "null" association that is always truthy and for which try_associate() always succeeds + struct null_association { + // this need not be explicit, although it should be + constexpr operator bool() const noexcept { + return true; + } + + // this may throw, although it need not + constexpr null_association try_associate() const noexcept { + return {}; + } + }; + + // a CRTP base that lets us produce variations on null_associaton with the right return type on try_associate + template + struct crtp_association : null_association { + constexpr Derived try_associate() const noexcept { + return {}; + } + }; + + struct throwing_specials : crtp_association { + // it's ok for the non-move operators to throw + throwing_specials() noexcept(false) { + // nvcc doesn't respect the noexcept(false) with = default + } + throwing_specials(const throwing_specials&) noexcept(false) { + // nvcc doesn't respect the noexcept(false) with = default + } + throwing_specials(throwing_specials&&) noexcept = default; + ~throwing_specials() = default; + + throwing_specials& operator=(const throwing_specials&) noexcept(false) { + // nvcc doesn't respect the noexcept(false) with = default + return *this; + } + throwing_specials& operator=(throwing_specials&&) noexcept = default; + }; + + struct move_only : crtp_association { + // copy operations are not required + move_only() = default; + move_only(move_only&&) = default; + ~move_only() = default; + + move_only& operator=(move_only&&) = default; + }; + + struct explicit_bool : crtp_association { + // the bool conversion may be explicit + constexpr explicit operator bool() const noexcept { + return true; + } + }; + + struct throwing_reassociate : crtp_association { + // try_associate may throw + constexpr throwing_reassociate try_associate() const noexcept(false) { + return {}; + } + }; + + TEST_CASE( + "Scope association concept accepts basic association types", + "[concepts][scope_association]") { + // scope_association should accept the basic null_association + STATIC_REQUIRE(ex::scope_association); + + // the default constructor and copy operations may throw + STATIC_REQUIRE(ex::scope_association); + // double check that we're testing what we think we are + STATIC_REQUIRE(!ex::__nothrow_constructible_from); + STATIC_REQUIRE(!ex::__nothrow_constructible_from); + STATIC_REQUIRE(!ex::__nothrow_assignable_from); + + // copy operations are not required + STATIC_REQUIRE(ex::scope_association); + + // the bool conversion may be explicit + STATIC_REQUIRE(ex::scope_association); + + // try_associate may throw + STATIC_REQUIRE(ex::scope_association); + } + + // invalid association because of immovability + struct immovable : crtp_association { + immovable(immovable&&) = delete; + }; + + // conditionally-invalid association because of throwing move operations + template + struct throwing_moves : crtp_association> { + throwing_moves() = default; + throwing_moves(throwing_moves&&) noexcept(ThrowingCtor) { + // nvcc doesn't respect the noexcept(false) with = default + } + ~throwing_moves() = default; + + throwing_moves& operator=(throwing_moves&&) noexcept(ThrowingAssign) { + // nvcc doesn't respect the noexcept(false) with = default + return *this; + } + }; + + // invalid assocation because of a throwing move constructor + using throwing_move_ctor = throwing_moves; + // invalid assocation because of a throwing move assignment operator + using throwing_move_assign = throwing_moves; + + // invalid assocation because of a missing default constructor + struct missing_ctor : crtp_association { + missing_ctor() = delete; + }; + + // invalid assocation because of a throwing conversion to bool + struct throwing_boolish : crtp_association { + constexpr explicit operator bool() const noexcept(false) { + return true; + } + }; + + // invalid assocation because try_associate returns the wrong type + struct cannot_reassociate : null_association { }; + + // invalid association because operator bool is non-const + struct non_const_boolish : crtp_association { + constexpr explicit operator bool() noexcept { + return true; + } + }; + + // invalid association because try_associate is non-const + struct non_const_try_associate : null_association { + constexpr non_const_try_associate try_associate() noexcept { + return {}; + }; + }; + + TEST_CASE( + "Scope association concept rejects non-association types", + "[concepts][scope_association]") { + STATIC_REQUIRE(!ex::scope_association); + + // movability is required + STATIC_REQUIRE(!ex::scope_association); + + // the move operations must be non-throwing + STATIC_REQUIRE(!ex::scope_association); + STATIC_REQUIRE(!ex::scope_association); + + // default initialization is required + STATIC_REQUIRE(!ex::scope_association); + + // conversion to bool must not throw + STATIC_REQUIRE(!ex::scope_association); + + // try_associate must return an association + STATIC_REQUIRE(!ex::scope_association); + + // operator bool must be const qualified + STATIC_REQUIRE(!ex::scope_association); + + // try_associate must be const qualified + STATIC_REQUIRE(!ex::scope_association); + } +} // namespace diff --git a/test/stdexec/concepts/test_concepts_scope_token.cpp b/test/stdexec/concepts/test_concepts_scope_token.cpp new file mode 100644 index 000000000..78468b5d4 --- /dev/null +++ b/test/stdexec/concepts/test_concepts_scope_token.cpp @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2022 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include + +namespace ex = stdexec; + +namespace { + + TEST_CASE("Scope token helpers are correctly defined", "[concepts][scope_token]") { + // check the test-sender and test-env definitions are appropriate + STATIC_REQUIRE(ex::sender); + STATIC_REQUIRE(ex::sender_in>); + STATIC_REQUIRE(ex::operation_state); + } + + // a "null" token that can always create new associations + struct null_token { + // the always-truthy association type + struct assoc { + // this need not be explicit, although it should be + constexpr operator bool() const noexcept { + return true; + } + + // this may throw, although it need not + constexpr assoc try_associate() const noexcept { + return {}; + } + }; + + constexpr assoc try_associate() const noexcept { + return {}; + } + + template + Sender&& wrap(Sender&& snd) const noexcept { + return std::forward(snd); + } + }; + + struct throwing_try_associate : null_token { + constexpr assoc try_associate() const noexcept(false) { + return {}; + } + }; + + struct throwing_wrap : null_token { + template + constexpr Sender&& wrap(Sender&& snd) const noexcept(false) { + return std::forward(snd); + } + }; + + struct wrapping_wrap : null_token { + template + struct wrapper : Sender { + wrapper(const Sender& snd) + : Sender(snd) { + } + + wrapper(Sender&& snd) + : Sender(std::move(snd)) { + } + }; + + template + auto wrap(Sender&& snd) const noexcept { + return wrapper{std::forward(snd)}; + } + }; + + TEST_CASE("Scope token concept accepts basic token types", "[concepts][scope_token]") { + // scope_token should accept the basic null_token + STATIC_REQUIRE(ex::scope_token); + + // it's ok for try_associate to throw + STATIC_REQUIRE(ex::scope_token); + + // it's ok for wrap to throw + STATIC_REQUIRE(ex::scope_token); + + // it's ok for wrap to change the type of its argument + STATIC_REQUIRE(ex::scope_token); + } + + struct move_only : null_token { + move_only() = default; + move_only(move_only&&) = default; + ~move_only() = default; + + move_only& operator=(move_only&&) = default; + }; + + struct non_const_try_associate : null_token { + assoc try_associate() noexcept { + return {}; + } + }; + + struct non_const_wrap : null_token { + template + Sender&& wrap(Sender&& snd) noexcept { + return std::forward(snd); + } + }; + + TEST_CASE("Scope token concept rejects non-token types", "[concepts][scope_token]") { + STATIC_REQUIRE(!ex::scope_token); + + // tokens must be copyable + STATIC_REQUIRE(!ex::scope_token); + + // try_associate must be const-qualified + STATIC_REQUIRE(!ex::scope_token); + + // wrap must be const-qualified + STATIC_REQUIRE(!ex::scope_token); + } +} // namespace diff --git a/test/stdexec/types/test_simple_counting_scope.cpp b/test/stdexec/types/test_simple_counting_scope.cpp new file mode 100644 index 000000000..f5d6bab00 --- /dev/null +++ b/test/stdexec/types/test_simple_counting_scope.cpp @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include + +namespace ex = stdexec; + +namespace { + TEST_CASE("unused scopes are safe to destroy", "[types][simple_counting_scope]") { + ex::simple_counting_scope scope; + (void) scope; + } + + TEST_CASE("the join-sender is a sender", "[types][simgple_counting_scope]") { + ex::simple_counting_scope scope; + + STATIC_REQUIRE(ex::sender); + + ex::run_loop loop; + auto env = ex::prop(ex::get_scheduler, loop.get_scheduler()); + + STATIC_REQUIRE(ex::sender_in); + STATIC_REQUIRE(ex::sender_in); + } + + TEST_CASE("joined scopes are safe to destroy", "[types][simple_counting_scope]") { + ex::simple_counting_scope scope; + ex::spawn(ex::just(), scope.get_token()); + wait_for_value(scope.join()); + } +} // namespace diff --git a/test/test_common/scope_tokens.hpp b/test/test_common/scope_tokens.hpp new file mode 100644 index 000000000..619c3a44a --- /dev/null +++ b/test/test_common/scope_tokens.hpp @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2025 Ian Petersen + * Copyright (c) 2025 NVIDIA Corporation + * + * Licensed under the Apache License Version 2.0 with LLVM Exceptions + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * https://llvm.org/LICENSE.txt + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#pragma once + +#include + +#include + +namespace { + struct null_token { + struct assoc { + constexpr explicit operator bool() const noexcept { + return true; + } + + constexpr assoc try_associate() const noexcept { + return {}; + } + }; + + template + constexpr Sender&& wrap(Sender&& sndr) const noexcept { + return std::forward(sndr); + } + + constexpr assoc try_associate() const noexcept { + return {}; + } + }; +} // namespace