Cleanup: Remove IndexRange::as_span() static array and cache

After 2cfcb8b0b8 this was only used in one place that
was easily replaced. In practice this avoids creating a statically
allocated array after the split edges code is called.

Pull Request: https://projects.blender.org/blender/blender/pulls/108249
This commit is contained in:
Hans Goudey 2023-05-25 15:42:54 +02:00 committed by Hans Goudey
parent a50768616f
commit 986ff5a3a4
5 changed files with 2 additions and 72 deletions

View File

@ -33,9 +33,6 @@
*
* Ideally this could be could be even closer to Python's enumerate(). We might get that in the
* future with newer C++ versions.
*
* One other important feature is the as_span method. This method returns a Span<int64_t>
* that contains the interval as individual numbers.
*/
#include <algorithm>
@ -325,22 +322,11 @@ class IndexRange {
return IndexRange(start_ + n, size_);
}
/**
* Get read-only access to a memory buffer that contains the range as actual numbers.
*/
Span<int64_t> as_span() const;
friend std::ostream &operator<<(std::ostream &stream, IndexRange range)
{
stream << "[" << range.start() << ", " << range.one_after_last() << ")";
return stream;
}
private:
static std::atomic<int64_t> s_current_array_size;
static std::atomic<int64_t *> s_current_array;
Span<int64_t> as_span_internal() const;
};
struct AlignedIndexRanges {

View File

@ -766,16 +766,4 @@ template<typename T> class MutableSpan {
}
};
/** This is defined here, because in `BLI_index_range.hh` `Span` is not yet defined. */
inline Span<int64_t> IndexRange::as_span() const
{
const int64_t min_required_size = start_ + size_;
const int64_t current_array_size = s_current_array_size.load(std::memory_order_acquire);
const int64_t *current_array = s_current_array.load(std::memory_order_acquire);
if (min_required_size <= current_array_size) {
return Span<int64_t>(current_array + start_, size_);
}
return this->as_span_internal();
}
} /* namespace blender */

View File

@ -10,40 +10,6 @@
namespace blender {
static RawVector<RawArray<int64_t, 0>> arrays;
static std::mutex current_array_mutex;
std::atomic<int64_t> IndexRange::s_current_array_size = 0;
std::atomic<int64_t *> IndexRange::s_current_array = nullptr;
Span<int64_t> IndexRange::as_span_internal() const
{
int64_t min_required_size = start_ + size_;
std::lock_guard<std::mutex> lock(current_array_mutex);
/* Double checked lock. */
if (min_required_size <= s_current_array_size) {
return Span<int64_t>(s_current_array + start_, size_);
}
/* Isolate, because a mutex is locked. */
threading::isolate_task([&]() {
int64_t new_size = std::max<int64_t>(1000, power_of_2_max_u(min_required_size));
RawArray<int64_t, 0> new_array(new_size);
threading::parallel_for(IndexRange(new_size), 4096, [&](const IndexRange range) {
for (const int64_t i : range) {
new_array[i] = i;
}
});
arrays.append(std::move(new_array));
s_current_array.store(arrays.last().data(), std::memory_order_release);
s_current_array_size.store(new_size, std::memory_order_release);
});
return Span<int64_t>(s_current_array + start_, size_);
}
AlignedIndexRanges split_index_range_by_alignment(const IndexRange range, const int64_t alignment)
{
BLI_assert(is_power_of_2_i(alignment));

View File

@ -219,17 +219,6 @@ TEST(index_range, TakeBackLargeN)
EXPECT_EQ(slice.size(), 4);
}
TEST(index_range, AsSpan)
{
IndexRange range = IndexRange(4, 6);
Span<int64_t> span = range.as_span();
EXPECT_EQ(span.size(), 6);
EXPECT_EQ(span[0], 4);
EXPECT_EQ(span[1], 5);
EXPECT_EQ(span[2], 6);
EXPECT_EQ(span[3], 7);
}
TEST(index_range, constexpr_)
{
constexpr IndexRange range = IndexRange(1, 1);

View File

@ -413,7 +413,8 @@ void split_edges(Mesh &mesh,
});
/* Used for transferring attributes. */
Vector<int> new_to_old_edges_map(IndexRange(new_edges.size()).as_span());
Vector<int> new_to_old_edges_map(new_edges.size());
std::iota(new_to_old_edges_map.begin(), new_to_old_edges_map.end(), 0);
/* Step 1: Split the edges. */