[gantt] render opid descriptions correctly

pull/1179/head
Tim Stack 10 months ago
parent f7da1df59b
commit 2da7361097

@ -892,4 +892,11 @@ struct frag_hasher {
}
};
struct intern_hasher {
size_t operator()(const intern_string_t& is) const
{
return hash_str(is.c_str(), is.size());
}
};
#endif

@ -12,7 +12,7 @@
"pattern": "^(?<timestamp>\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2})) (?<level>\\w+)(?:\\(\\d+\\)+)? (?<prc>[\\w\\-]+)\\[(?<tid>\\w+)\\]:? \\[(?:opI(?:D|d)=(?<opid>[^\\]]+))\\]\\s*(?<body>.*)$"
},
"section": {
"pattern": "^(?<timestamp>\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2})) (?:- last log rotation time, \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2}))?\\s*(ESX KMX Agent started.|(?:- time the service was last started(?: \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z)?, )?Section for (?:[^,]+), pid=(?<tid>\\w+).*)"
"pattern": "^(?<timestamp>\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2})) (?:- last log rotation time, \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2}))?\\s*(ESX KMX Agent started.|(?:- time the service was last started(?: \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}(?:Z|[-+]\\d{2}:\\d{2}))?, )?Section for (?:[^,]+), pid=(?<tid>\\w+).*)"
},
"esx-section": {
"pattern": "^(?<timestamp>\\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2})) (?<level>\\w+)(?:\\(\\d+\\)+) (?<prc>[\\w\\-]+)\\[(?<tid>\\w+)\\]: (?:Logs rotated. \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2}))?(?:- last log rotation time, \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}\\.\\d{3}(?:Z|[-+]\\d{2}:\\d{2}))?\\s*(ESX KMX Agent started.|(?:- time the service was last started(?: \\d{4}-\\d{2}-\\d{2}T\\d{2}:\\d{2}:\\d{2}.\\d{3}Z)?, )?Section for (?:[^,]+), pid=(?:\\w+).*)"
@ -276,6 +276,9 @@
},
{
"line": "2022-06-02T11:49:41.274Z INFO vsan-mgmt[51740] [VsanVcObjectHelper::wrapper opID=SWI-2230eb26-8c37] Ready to get single executor result for the key ['_QuerySpaceUsage', 'vsan-cluster-space-report-system', 'domain-c115', '', 'False'] in timeout 600\n2022-06-02T12:23:46.807Z [pool-2-thread-18] INFO opId= com.vmware.vim.storage.common.task.CustomThreadPoolExecutor - [VLSI-client] Request took 4 millis to execute.\n2022-06-02T12:26:24.319Z INFO vsan-mgmt[16622] [VsanVcPerformanceManagerImpl::QueryClusterHealth opID=21fcddd1] CMMDS primary exists but no stats primary, check node information again.\n2022-06-02T12:26:27.109Z info vsanvcmgmtd[38723] [vSAN@6876 sub=CnsSync] Sync ds:///vmfs/volumes/5f65bf61-0e36b15d-fbd6-005056a00b50/: startVClock = 0, fullSync = true"
},
{
"line": "2023-08-04T11:01:42.873-07:00 info vmware-vum-server[192680] [Originator@6876 sub=Libs] lib/ssl: OpenSSL using FIPS provider"
}
]
}

@ -52,6 +52,8 @@ static const std::vector<std::chrono::seconds> TIME_SPANS = {
365 * 24h,
};
static constexpr size_t MAX_OPID_WIDTH = 60;
gantt_header_overlay::gantt_header_overlay(std::shared_ptr<gantt_source> src)
: gho_src(src)
{
@ -209,6 +211,8 @@ gantt_source::text_value_for_line(textview_curses& tc,
this->gs_rendered_line.clear();
auto total_msgs = row.or_value.get_total_msgs();
auto truncated_name = row.or_name.to_string();
truncate_to(truncated_name, MAX_OPID_WIDTH);
this->gs_rendered_line
.append(duration_str, VC_ROLE.value(role_t::VCR_OFFSET_TIME))
.append(" ")
@ -218,12 +222,12 @@ gantt_source::text_value_for_line(textview_curses& tc,
row.or_value.otr_level_counts[log_level_t::LEVEL_WARNING],
total_msgs)))
.append(" ")
.append(lnav::roles::identifier(row.or_name.to_string()))
.append(this->gs_opid_width - row.or_name.length(), ' ');
for (const auto& desc_pair : row.or_value.otr_description) {
this->gs_rendered_line.append(" ");
this->gs_rendered_line.append(desc_pair.second);
}
.append(lnav::roles::identifier(truncated_name))
.append(this->gs_opid_width
- utf8_string_length(truncated_name)
.unwrapOr(this->gs_opid_width),
' ')
.append(row.or_description);
this->gs_rendered_line.with_attr_for_all(
VC_ROLE.value(role_t::VCR_COMMENT));
@ -319,7 +323,7 @@ gantt_source::rebuild_indexes()
auto max_desc_width = size_t{0};
log_opid_map active_opids;
std::map<string_fragment, opid_row> active_opids;
for (const auto& ld : this->gs_lss) {
if (ld->get_file_ptr() == nullptr) {
continue;
@ -328,6 +332,7 @@ gantt_source::rebuild_indexes()
continue;
}
auto format = ld->get_file_ptr()->get_format();
safe::ReadAccess<logfile::safe_opid_map> r_opid_map(
ld->get_file_ptr()->get_opids());
@ -342,9 +347,32 @@ gantt_source::rebuild_indexes()
auto active_iter = active_opids.find(pair.first);
if (active_iter == active_opids.end()) {
active_opids.emplace(iter->first, pair.second);
auto active_emp_res = active_opids.emplace(
iter->first, opid_row{pair.first, pair.second});
active_iter = active_emp_res.first;
} else {
active_iter->second |= pair.second;
active_iter->second.or_value |= pair.second;
}
if (pair.second.otr_description_id) {
auto desc_id = pair.second.otr_description_id.value();
auto desc_def_iter
= format->lf_opid_description_def->find(desc_id);
if (desc_def_iter != format->lf_opid_description_def->end()) {
auto& format_descs
= iter->second.odd_format_to_desc[format->get_name()];
format_descs[desc_id]
= desc_def_iter->second.od_descriptors;
auto& all_descs = active_iter->second.or_descriptions;
auto& curr_desc_m = all_descs[format->get_name()][desc_id];
auto& new_desc_v = pair.second.otr_description;
for (const auto& desc_pair : new_desc_v) {
curr_desc_m[desc_pair.first] = desc_pair.second;
}
}
}
}
}
@ -352,20 +380,16 @@ gantt_source::rebuild_indexes()
std::multimap<struct timeval, opid_row> time_order_map;
for (const auto& pair : active_opids) {
if (this->gs_lower_bound.tv_sec == 0
|| pair.second.otr_begin < this->gs_lower_bound)
|| pair.second.or_value.otr_begin < this->gs_lower_bound)
{
this->gs_lower_bound = pair.second.otr_begin;
this->gs_lower_bound = pair.second.or_value.otr_begin;
}
if (this->gs_upper_bound.tv_sec == 0
|| this->gs_upper_bound < pair.second.otr_end)
|| this->gs_upper_bound < pair.second.or_value.otr_end)
{
this->gs_upper_bound = pair.second.otr_end;
}
if (pair.first.length() > this->gs_opid_width) {
this->gs_opid_width = pair.first.length();
this->gs_upper_bound = pair.second.or_value.otr_end;
}
time_order_map.emplace(pair.second.otr_begin,
opid_row{pair.first, pair.second});
time_order_map.emplace(pair.second.or_value.otr_begin, pair.second);
}
this->gs_time_order.clear();
size_t filtered_in_count = 0;
@ -378,16 +402,31 @@ gantt_source::rebuild_indexes()
}
}
this->gs_filter_hits = {};
for (const auto& pair : time_order_map) {
std::string full_desc;
for (const auto& desc : pair.second.or_value.otr_description) {
full_desc.append(" ");
full_desc.append(desc.second);
for (auto& pair : time_order_map) {
auto& full_desc = pair.second.or_description;
for (auto& desc : pair.second.or_descriptions) {
const auto& format_desc_defs
= this->gs_opid_map[pair.second.or_name]
.odd_format_to_desc[desc.first];
for (auto& desc_format_pairs : desc.second) {
const auto& desc_def_v
= *format_desc_defs.find(desc_format_pairs.first)->second;
for (size_t lpc = 0; lpc < desc_def_v.size(); lpc++) {
full_desc.append(desc_def_v[lpc].od_prefix);
full_desc.append(desc_format_pairs.second[lpc]);
full_desc.append(desc_def_v[lpc].od_suffix);
}
}
}
shared_buffer sb;
shared_buffer_ref sbr;
sbr.share(sb, full_desc.c_str(), full_desc.length());
shared_buffer sb_opid;
shared_buffer_ref sbr_opid;
sbr_opid.share(
sb_opid, pair.second.or_name.data(), pair.second.or_name.length());
shared_buffer sb_desc;
shared_buffer_ref sbr_desc;
sbr_desc.share(sb_desc, full_desc.c_str(), full_desc.length());
if (this->tss_apply_filters) {
auto filtered_in = false;
auto filtered_out = false;
@ -395,17 +434,19 @@ gantt_source::rebuild_indexes()
if (!filt->is_enabled()) {
continue;
}
if (filt->matches(nonstd::nullopt, sbr)) {
this->gs_filter_hits[filt->get_index()] += 1;
switch (filt->get_type()) {
case text_filter::INCLUDE:
filtered_in = true;
break;
case text_filter::EXCLUDE:
filtered_out = true;
break;
default:
break;
for (const auto& sbr : {sbr_opid, sbr_desc}) {
if (filt->matches(nonstd::nullopt, sbr)) {
this->gs_filter_hits[filt->get_index()] += 1;
switch (filt->get_type()) {
case text_filter::INCLUDE:
filtered_in = true;
break;
case text_filter::EXCLUDE:
filtered_out = true;
break;
default:
break;
}
}
}
}
@ -415,6 +456,9 @@ gantt_source::rebuild_indexes()
}
}
if (pair.second.or_name.length() > this->gs_opid_width) {
this->gs_opid_width = pair.second.or_name.length();
}
if (full_desc.size() > max_desc_width) {
max_desc_width = full_desc.size();
}
@ -426,8 +470,9 @@ gantt_source::rebuild_indexes()
{
bm_warns.insert_once(vis_line_t(this->gs_time_order.size()));
}
this->gs_time_order.emplace_back(pair.second);
this->gs_time_order.emplace_back(std::move(pair.second));
}
this->gs_opid_width = std::min(this->gs_opid_width, MAX_OPID_WIDTH);
this->gs_total_width = 22 + this->gs_opid_width + max_desc_width;
this->tss_view->set_needs_update();

@ -81,7 +81,13 @@ public:
gantt_status_source& gs_preview_status_source;
ArenaAlloc::Alloc<char> gs_allocator{64 * 1024};
struct opid_description_defs {};
struct opid_description_defs {
std::map<
intern_string_t,
std::map<intern_string_t,
std::shared_ptr<std::vector<log_format::opid_descriptor>>>>
odd_format_to_desc;
};
using gantt_opid_map
= robin_hood::unordered_map<string_fragment,
@ -94,6 +100,9 @@ public:
struct opid_row {
string_fragment or_name;
opid_time_range or_value;
std::map<intern_string_t,
std::map<intern_string_t, std::map<size_t, std::string>>>
or_descriptions;
std::string or_description;
};

@ -799,6 +799,9 @@ external_log_format::scan(logfile& lf,
const auto* line_data = (const unsigned char*) sbr.get_data();
this->lf_desc_captures.clear();
this->lf_desc_allocator.reset();
yajl_reset(handle);
ypc.set_static_handler(json_log_handlers.jpc_children[0]);
ypc.ypc_userdata = &jlu;
@ -839,6 +842,102 @@ external_log_format::scan(logfile& lf,
}
opid_iter->second.otr_level_counts[ll.get_msg_level()] += 1;
auto& otr = opid_iter->second;
if (!otr.otr_description_id) {
for (const auto& desc_def_pair :
*this->lf_opid_description_def)
{
if (otr.otr_description_id) {
break;
}
for (const auto& desc_def :
*desc_def_pair.second.od_descriptors)
{
auto desc_cap_iter = this->lf_desc_captures.find(
desc_def.od_field.pp_value);
if (desc_cap_iter == this->lf_desc_captures.end()) {
continue;
}
if (desc_def.od_extractor.pp_value) {
static thread_local auto desc_md
= lnav::pcre2pp::match_data::unitialized();
auto desc_match_res
= desc_def.od_extractor.pp_value
->capture_from(desc_cap_iter->second)
.into(desc_md)
.matches(PCRE2_NO_UTF_CHECK)
.ignore_error();
if (desc_match_res) {
otr.otr_description_id
= desc_def_pair.first;
}
} else {
otr.otr_description_id = desc_def_pair.first;
}
}
}
}
if (otr.otr_description_id) {
const auto& desc_def_v
= *this->lf_opid_description_def
->find(
opid_iter->second.otr_description_id.value())
->second.od_descriptors;
auto& desc_v = opid_iter->second.otr_description;
if (desc_def_v.size() != desc_v.size()) {
for (size_t desc_def_index = 0;
desc_def_index < desc_def_v.size();
desc_def_index++)
{
const auto& desc_def = desc_def_v[desc_def_index];
auto found_desc = false;
for (const auto& desc_pair : desc_v) {
if (desc_pair.first == desc_def_index) {
found_desc = true;
break;
}
}
if (!found_desc) {
auto desc_cap_iter
= this->lf_desc_captures.find(
desc_def.od_field.pp_value);
if (desc_cap_iter
== this->lf_desc_captures.end())
{
continue;
}
if (desc_def.od_extractor.pp_value) {
static thread_local auto desc_md = lnav::
pcre2pp::match_data::unitialized();
auto match_res
= desc_def.od_extractor.pp_value
->capture_from(
desc_cap_iter->second)
.into(desc_md)
.matches(PCRE2_NO_UTF_CHECK)
.ignore_error();
if (match_res) {
desc_v.emplace_back(
desc_def_index,
desc_md.to_string());
}
} else {
desc_v.emplace_back(
desc_def_index,
desc_cap_iter->second.to_string());
}
}
}
}
}
}
jlu.jlu_sub_line_count += this->jlf_line_format_init_count;
@ -990,7 +1089,7 @@ external_log_format::scan(logfile& lf,
break;
}
for (const auto& desc_def :
desc_def_pair.second.od_descriptors)
*desc_def_pair.second.od_descriptors)
{
auto desc_field_index_iter
= fpat->p_value_name_to_index.find(
@ -1030,9 +1129,9 @@ external_log_format::scan(logfile& lf,
if (otr.otr_description_id) {
const auto& desc_def_v
= this->lf_opid_description_def
->find(opid_iter->second.otr_description_id.value())
->second.od_descriptors;
= *this->lf_opid_description_def
->find(opid_iter->second.otr_description_id.value())
->second.od_descriptors;
auto& desc_v = opid_iter->second.otr_description;
if (desc_def_v.size() != desc_v.size()) {
@ -1540,6 +1639,12 @@ read_json_field(yajlpp_parse_context* ypc, const unsigned char* str, size_t len)
}
}
if (jlu->jlu_format->lf_desc_fields.contains(field_name)) {
auto frag_copy = frag.to_owned(jlu->jlu_format->lf_desc_allocator);
jlu->jlu_format->lf_desc_captures.emplace(field_name, frag_copy);
}
jlu->add_sub_lines_for(
field_name, ypc->is_level(1), nonstd::nullopt, str, len);
@ -2510,7 +2615,7 @@ external_log_format::build(std::vector<lnav::console::user_message>& errors)
}
for (const auto& opid_desc_pair : *this->lf_opid_description_def) {
for (const auto& opid_desc : opid_desc_pair.second.od_descriptors) {
for (const auto& opid_desc : *opid_desc_pair.second.od_descriptors) {
auto iter = this->elf_value_defs.find(opid_desc.od_field.pp_value);
if (iter == this->elf_value_defs.end()) {
errors.emplace_back(
@ -2522,6 +2627,8 @@ external_log_format::build(std::vector<lnav::console::user_message>& errors)
attr_line_t("unknown value name ")
.append_quoted(opid_desc.od_field.pp_value))
.with_snippets(this->get_snippets()));
} else {
this->lf_desc_fields.insert(iter->first);
}
}
}

@ -549,13 +549,29 @@ public:
};
struct opid_descriptors {
std::vector<opid_descriptor> od_descriptors;
std::shared_ptr<std::vector<opid_descriptor>> od_descriptors;
};
std::shared_ptr<std::map<intern_string_t, opid_descriptors>>
lf_opid_description_def{
std::make_shared<std::map<intern_string_t, opid_descriptors>>()};
ArenaAlloc::Alloc<char> lf_desc_allocator{2 * 1024};
using desc_field_set
= robin_hood::unordered_set<intern_string_t,
intern_hasher,
std::equal_to<intern_string_t>>;
desc_field_set lf_desc_fields;
using desc_cap_map
= robin_hood::unordered_map<intern_string_t,
string_fragment,
intern_hasher,
std::equal_to<intern_string_t>>;
desc_cap_map lf_desc_captures;
protected:
static std::vector<std::shared_ptr<log_format>> lf_root_formats;

@ -1,11 +1,11 @@
// -*- c++ -*-
/******************************************************************************
* arenaalloc.h
*
*
* Arena allocator based on the example logic provided by Nicolai Josuttis
* and available at http://www.josuttis.com/libbook/examples.html.
* This enhanced work is provided under the terms of the MIT license.
*
*
*****************************************************************************/
#ifndef _ARENA_ALLOC_H
@ -15,43 +15,40 @@
#include <memory>
#if __cplusplus >= 201103L
#include <type_traits>
#include <utility>
# include <type_traits>
# include <utility>
#endif
// Define macro ARENA_ALLOC_DEBUG to enable some tracing of the allocator
#include "arenaallocimpl.h"
namespace ArenaAlloc
{
struct _newAllocatorImpl
{
namespace ArenaAlloc {
struct _newAllocatorImpl {
// these two functions should be supported by a specialized
// allocator for shared memory or another source of specialized
// memory such as device mapped memory.
void* allocate( size_t numBytes ) { return new char[ numBytes ]; }
void deallocate( void* ptr ) { delete[]( (char*)ptr ); }
};
template <class T,
class AllocatorImpl = _newAllocatorImpl,
class MemblockImpl = _memblockimpl<AllocatorImpl> >
class Alloc {
private:
MemblockImpl* m_impl;
public:
void* allocate(size_t numBytes) { return new char[numBytes]; }
void deallocate(void* ptr) { delete[] ((char*) ptr); }
};
template<class T,
class AllocatorImpl = _newAllocatorImpl,
class MemblockImpl = _memblockimpl<AllocatorImpl> >
class Alloc {
private:
MemblockImpl* m_impl;
public:
// type definitions
typedef T value_type;
typedef T* pointer;
typedef T value_type;
typedef T* pointer;
typedef const T* const_pointer;
typedef T& reference;
typedef T& reference;
typedef const T& const_reference;
typedef std::size_t size_type;
typedef std::size_t size_type;
typedef std::ptrdiff_t difference_type;
#if __cplusplus >= 201103L
// when containers are swapped, (i.e. vector.swap)
// swap the allocators also. This was not specified in c++98
@ -64,123 +61,110 @@ namespace ArenaAlloc
typedef std::true_type propagate_on_container_swap;
// container moves should move the allocator also.
typedef std::true_type propagate_on_container_move_assignment;
typedef std::true_type propagate_on_container_move_assignment;
#endif
// rebind allocator to type U
template <class U>
template<class U>
struct rebind {
typedef Alloc<U,AllocatorImpl,MemblockImpl> other;
typedef Alloc<U, AllocatorImpl, MemblockImpl> other;
};
// return address of values
pointer address (reference value) const {
return &value;
}
const_pointer address (const_reference value) const {
return &value;
}
pointer address(reference value) const { return &value; }
const_pointer address(const_reference value) const { return &value; }
Alloc( std::size_t defaultSize = 32768, AllocatorImpl allocImpl = AllocatorImpl() ) throw():
m_impl( MemblockImpl::create( defaultSize, allocImpl ) )
{
}
Alloc(const Alloc& src) throw():
m_impl( src.m_impl )
Alloc(std::size_t defaultSize = 32768,
AllocatorImpl allocImpl = AllocatorImpl()) throw()
: m_impl(MemblockImpl::create(defaultSize, allocImpl))
{
m_impl->incrementRefCount();
}
template <class U>
Alloc (const Alloc<U,AllocatorImpl,MemblockImpl>& src) throw():
m_impl( 0 )
Alloc(const Alloc& src) throw() : m_impl(src.m_impl)
{
MemblockImpl::assign( src, m_impl );
m_impl->incrementRefCount();
m_impl->incrementRefCount();
}
~Alloc() throw()
template<class U>
Alloc(const Alloc<U, AllocatorImpl, MemblockImpl>& src) throw() : m_impl(0)
{
m_impl->decrementRefCount();
MemblockImpl::assign(src, m_impl);
m_impl->incrementRefCount();
}
~Alloc() throw() { m_impl->decrementRefCount(); }
// return maximum number of elements that can be allocated
size_type max_size () const throw()
size_type max_size() const throw()
{
return std::numeric_limits<std::size_t>::max() / sizeof(T);
return std::numeric_limits<std::size_t>::max() / sizeof(T);
}
void reset() { m_impl->reset(); }
// allocate but don't initialize num elements of type T
pointer allocate (size_type num, const void* = 0)
pointer allocate(size_type num, const void* = 0)
{
return reinterpret_cast<pointer>( m_impl->allocate(num*sizeof(T)) );
return reinterpret_cast<pointer>(m_impl->allocate(num * sizeof(T)));
}
// initialize elements of allocated storage p with value value
#if __cplusplus >= 201103L
// use c++11 style forwarding to construct the object
template< typename P, typename... Args>
void construct( P* obj, Args&&... args )
// use c++11 style forwarding to construct the object
template<typename P, typename... Args>
void construct(P* obj, Args&&... args)
{
::new((void*) obj ) P( std::forward<Args>( args )... );
::new ((void*) obj) P(std::forward<Args>(args)...);
}
template< typename P >
void destroy( P* obj ) { obj->~P(); }
#else
void construct (pointer p, const T& value)
template<typename P>
void destroy(P* obj)
{
new((void*)p)T(value);
obj->~P();
}
void destroy (pointer p) { p->~T(); }
#else
void construct(pointer p, const T& value) { new ((void*) p) T(value); }
void destroy(pointer p) { p->~T(); }
#endif
// deallocate storage p of deleted elements
void deallocate (pointer p, size_type num)
{
m_impl->deallocate( p );
}
bool equals( const MemblockImpl * impl ) const
{
return impl == m_impl;
}
bool operator == ( const Alloc& t2 ) const
{
return m_impl == t2.m_impl;
}
void deallocate(pointer p, size_type num) { m_impl->deallocate(p); }
bool equals(const MemblockImpl* impl) const { return impl == m_impl; }
bool operator==(const Alloc& t2) const { return m_impl == t2.m_impl; }
friend MemblockImpl;
template< typename Other >
bool operator == ( const Alloc< Other, AllocatorImpl, MemblockImpl >& t2 )
template<typename Other>
bool operator==(const Alloc<Other, AllocatorImpl, MemblockImpl>& t2)
{
return t2.equals( m_impl );
return t2.equals(m_impl);
}
template< typename Other >
bool operator != ( const Alloc< Other, AllocatorImpl, MemblockImpl >& t2 )
template<typename Other>
bool operator!=(const Alloc<Other, AllocatorImpl, MemblockImpl>& t2)
{
return !t2.equals( m_impl );
return !t2.equals(m_impl);
}
// These are extension functions not required for an stl allocator
size_t getNumAllocations() { return m_impl->getNumAllocations(); }
size_t getNumDeallocations() { return m_impl->getNumDeallocations(); }
size_t getNumBytesAllocated() { return m_impl->getNumBytesAllocated(); }
};
template<typename A>
template<typename T>
void _memblockimpl<A>::assign( const Alloc<T,A, _memblockimpl<A> >& src, _memblockimpl<A> *& dest )
{
dest = const_cast<_memblockimpl<A>* >(src.m_impl);
}
size_t getNumBytesAllocated() { return m_impl->getNumBytesAllocated(); }
};
template<typename A>
template<typename T>
void
_memblockimpl<A>::assign(const Alloc<T, A, _memblockimpl<A> >& src,
_memblockimpl<A>*& dest)
{
dest = const_cast<_memblockimpl<A>*>(src.m_impl);
}
} // namespace ArenaAlloc
#endif

@ -1,7 +1,7 @@
// -*- c++ -*-
/******************************************************************************
** arenaallocimpl.h
**
**
** Internal implementation types of the arena allocator
** MIT license
*****************************************************************************/
@ -10,281 +10,300 @@
#define _ARENA_ALLOC_IMPL_H
#ifdef ARENA_ALLOC_DEBUG
#include <stdio.h>
# include <stdio.h>
#endif
#include <stdint.h>
namespace ArenaAlloc
{
namespace ArenaAlloc {
template< typename T, typename A, typename M >
class Alloc;
// internal structure for tracking memory blocks
template < typename AllocImpl >
struct _memblock
{
template<typename T, typename A, typename M>
class Alloc;
// internal structure for tracking memory blocks
template<typename AllocImpl>
struct _memblock {
// allocations are rounded up to a multiple of the size of this
// struct to maintain proper alignment for any pointer and double
// values stored in the allocation.
// A future goal is to support even stricter alignment for example
// to support cache alignment, special device dependent mappings,
// or GPU ops.
union _roundsize {
double d;
void* p;
};
_memblock* m_next{nullptr}; // blocks kept link listed for cleanup at end
std::size_t m_bufferSize; // size of the buffer
std::size_t m_index; // index of next allocatable byte in the block
char* m_buffer; // pointer to large block to allocate from
_memblock(std::size_t bufferSize, AllocImpl& allocImpl)
: m_bufferSize(roundSize(bufferSize)), m_index(0),
m_buffer(reinterpret_cast<char*>(allocImpl.allocate(
bufferSize))) // this works b/c of order of decl
{
}
std::size_t roundSize( std::size_t numBytes )
// struct to maintain proper alignment for any pointer and double
// values stored in the allocation.
// A future goal is to support even stricter alignment for example
// to support cache alignment, special device dependent mappings,
// or GPU ops.
union _roundsize {
double d;
void* p;
};
_memblock* m_next{nullptr}; // blocks kept link listed for cleanup at end
std::size_t m_bufferSize; // size of the buffer
std::size_t m_index; // index of next allocatable byte in the block
char* m_buffer; // pointer to large block to allocate from
_memblock(std::size_t bufferSize, AllocImpl& allocImpl)
: m_bufferSize(roundSize(bufferSize)), m_index(0),
m_buffer(reinterpret_cast<char*>(allocImpl.allocate(
bufferSize))) // this works b/c of order of decl
{
// this is subject to overflow. calling logic should not permit
// an attempt to allocate a really massive size.
// i.e. an attempt to allocate 10s of terabytes should be an error
return ( ( numBytes + sizeof( _roundsize ) - 1 ) /
sizeof( _roundsize ) ) * sizeof( _roundsize );
}
char * allocate( std::size_t numBytes )
std::size_t roundSize(std::size_t numBytes)
{
std::size_t roundedSize = roundSize( numBytes );
if( roundedSize + m_index > m_bufferSize )
return 0;
char * ptrToReturn = &m_buffer[ m_index ];
m_index += roundedSize;
return ptrToReturn;
// this is subject to overflow. calling logic should not permit
// an attempt to allocate a really massive size.
// i.e. an attempt to allocate 10s of terabytes should be an error
return ((numBytes + sizeof(_roundsize) - 1) / sizeof(_roundsize))
* sizeof(_roundsize);
}
void dispose( AllocImpl& impl )
char* allocate(std::size_t numBytes)
{
impl.deallocate( m_buffer );
std::size_t roundedSize = roundSize(numBytes);
if (roundedSize + m_index > m_bufferSize)
return 0;
char* ptrToReturn = &m_buffer[m_index];
m_index += roundedSize;
return ptrToReturn;
}
~_memblock()
{
}
};
template< typename AllocatorImpl, typename Derived >
struct _memblockimplbase
{
void reset() { this->m_index = 0; }
void dispose(AllocImpl& impl) { impl.deallocate(m_buffer); }
~_memblock() {}
};
template<typename AllocatorImpl, typename Derived>
struct _memblockimplbase {
AllocatorImpl m_alloc;
std::size_t m_refCount; // when refs -> 0 delete this
std::size_t m_refCount; // when refs -> 0 delete this
std::size_t m_defaultSize;
std::size_t m_numAllocate; // number of times allocate called
std::size_t m_numDeallocate; // number of time deallocate called
std::size_t m_numBytesAllocated; // A good estimate of amount of space used
_memblock<AllocatorImpl> * m_head;
_memblock<AllocatorImpl> * m_current;
std::size_t m_numAllocate; // number of times allocate called
std::size_t m_numDeallocate; // number of time deallocate called
std::size_t m_numBytesAllocated; // A good estimate of amount of space used
_memblock<AllocatorImpl>* m_head;
_memblock<AllocatorImpl>* m_current;
// round up 2 next power of 2 if not already
// a power of 2
std::size_t roundpow2( std::size_t value )
std::size_t roundpow2(std::size_t value)
{
// note this works because subtracting 1 is equivalent to
// inverting the lowest set bit and complementing any
// bits lower than that. only a power of 2
// will yield 0 in the following check
if( 0 == ( value & ( value - 1 ) ) )
return value; // already a power of 2
// fold t over itself. This will set all bits after the highest set bit of t to 1
// who said bit twiddling wasn't practical?
value |= value >> 1;
value |= value >> 2;
value |= value >> 4;
value |= value >> 8;
value |= value >> 16;
// note this works because subtracting 1 is equivalent to
// inverting the lowest set bit and complementing any
// bits lower than that. only a power of 2
// will yield 0 in the following check
if (0 == (value & (value - 1)))
return value; // already a power of 2
// fold t over itself. This will set all bits after the highest set bit
// of t to 1 who said bit twiddling wasn't practical?
value |= value >> 1;
value |= value >> 2;
value |= value >> 4;
value |= value >> 8;
value |= value >> 16;
#if SIZE_MAX > UINT32_MAX
value |= value >> 32;
value |= value >> 32;
#endif
return value + 1;
return value + 1;
}
_memblockimplbase( std::size_t defaultSize, AllocatorImpl& allocator ):
m_alloc( allocator ),
m_refCount( 1 ),
m_defaultSize( defaultSize ),
m_numAllocate( 0 ),
m_numDeallocate( 0 ),
m_numBytesAllocated( 0 ),
m_head( 0 ),
m_current( 0 )
{
if( m_defaultSize < 256 )
{
m_defaultSize = 256; // anything less is academic. a more practical size is 4k or more
}
else if ( m_defaultSize > 1024UL*1024*1024*16 )
{
// when this becomes a problem, this package has succeeded beyond my wildest expectations
m_defaultSize = 1024UL*1024*1024*16;
}
// for convenience block size should be a power of 2
// round up to next power of 2
m_defaultSize = roundpow2( m_defaultSize );
allocateNewBlock( m_defaultSize );
_memblockimplbase(std::size_t defaultSize, AllocatorImpl& allocator)
: m_alloc(allocator), m_refCount(1), m_defaultSize(defaultSize),
m_numAllocate(0), m_numDeallocate(0), m_numBytesAllocated(0),
m_head(0), m_current(0)
{
if (m_defaultSize < 256) {
m_defaultSize = 256; // anything less is academic. a more practical
// size is 4k or more
} else if (m_defaultSize > 1024UL * 1024 * 1024 * 16) {
// when this becomes a problem, this package has succeeded beyond my
// wildest expectations
m_defaultSize = 1024UL * 1024 * 1024 * 16;
}
// for convenience block size should be a power of 2
// round up to next power of 2
m_defaultSize = roundpow2(m_defaultSize);
allocateNewBlock(m_defaultSize);
}
char * allocate( std::size_t numBytes )
char* allocate(std::size_t numBytes)
{
char * ptrToReturn = m_current->allocate( numBytes );
if( !ptrToReturn )
{
allocateNewBlock( numBytes > m_defaultSize / 2 ? roundpow2( numBytes*2 ) :
m_defaultSize );
ptrToReturn = m_current->allocate( numBytes );
}
char* ptrToReturn = m_current->allocate(numBytes);
if (!ptrToReturn) {
allocateNewBlock(numBytes > m_defaultSize / 2
? roundpow2(numBytes * 2)
: m_defaultSize);
ptrToReturn = m_current->allocate(numBytes);
}
#ifdef ARENA_ALLOC_DEBUG
fprintf( stdout, "_memblockimpl=%p allocated %ld bytes at address=%p\n", this, numBytes, ptrToReturn );
fprintf(stdout,
"_memblockimpl=%p allocated %ld bytes at address=%p\n",
this,
numBytes,
ptrToReturn);
#endif
++ m_numAllocate;
m_numBytesAllocated += numBytes; // does not account for the small overhead in tracking the allocation
return ptrToReturn;
++m_numAllocate;
m_numBytesAllocated += numBytes; // does not account for the small
// overhead in tracking the allocation
return ptrToReturn;
}
void allocateNewBlock( std::size_t blockSize )
{
_memblock<AllocatorImpl> * newBlock = new ( m_alloc.allocate( sizeof( _memblock<AllocatorImpl> ) ) )
_memblock<AllocatorImpl>( blockSize, m_alloc );
#ifdef ARENA_ALLOC_DEBUG
fprintf( stdout, "_memblockimplbase=%p allocating a new block of size=%ld\n", this, blockSize );
#endif
if( m_head == 0 )
{
m_head = m_current = newBlock;
}
else
{
m_current->m_next = newBlock;
m_current = newBlock;
}
}
void deallocate( void * ptr )
void allocateNewBlock(std::size_t blockSize)
{
++ m_numDeallocate;
_memblock<AllocatorImpl>* newBlock
= new (m_alloc.allocate(sizeof(_memblock<AllocatorImpl>)))
_memblock<AllocatorImpl>(blockSize, m_alloc);
#ifdef ARENA_ALLOC_DEBUG
fprintf(stdout,
"_memblockimplbase=%p allocating a new block of size=%ld\n",
this,
blockSize);
#endif
if (m_head == 0) {
m_head = m_current = newBlock;
} else {
m_current->m_next = newBlock;
m_current = newBlock;
}
}
void deallocate(void* ptr) { ++m_numDeallocate; }
size_t getNumAllocations() { return m_numAllocate; }
size_t getNumDeallocations() { return m_numDeallocate; }
size_t getNumBytesAllocated() { return m_numBytesAllocated; }
void clear()
{
_memblock<AllocatorImpl> * block = m_head;
while( block )
{
_memblock<AllocatorImpl> * curr = block;
block = block->m_next;
curr->dispose( m_alloc );
curr->~_memblock<AllocatorImpl>();
m_alloc.deallocate( curr );
}
}
// The ref counting model does not permit the sharing of
// this object across multiple threads unless an external locking mechanism is applied
// to ensure the atomicity of the reference count.
void incrementRefCount()
{
++m_refCount;
_memblock<AllocatorImpl>* block = m_head;
while (block) {
_memblock<AllocatorImpl>* curr = block;
block = block->m_next;
curr->dispose(m_alloc);
curr->~_memblock<AllocatorImpl>();
m_alloc.deallocate(curr);
}
}
void reset()
{
m_head->reset();
m_current = m_head;
this->m_numBytesAllocated = 0;
_memblock<AllocatorImpl>* block = m_head->m_next;
m_head->m_next = nullptr;
while (block) {
_memblock<AllocatorImpl>* curr = block;
block = block->m_next;
curr->dispose(m_alloc);
curr->~_memblock<AllocatorImpl>();
m_alloc.deallocate(curr);
}
}
// The ref counting model does not permit the sharing of
// this object across multiple threads unless an external locking mechanism
// is applied to ensure the atomicity of the reference count.
void incrementRefCount()
{
++m_refCount;
#ifdef ARENA_ALLOC_DEBUG
fprintf( stdout, "ref count on _memblockimplbase=%p incremented to %ld\n", this, m_refCount );
#endif
fprintf(stdout,
"ref count on _memblockimplbase=%p incremented to %ld\n",
this,
m_refCount);
#endif
}
void decrementRefCount()
{
--m_refCount;
--m_refCount;
#ifdef ARENA_ALLOC_DEBUG
fprintf( stdout, "ref count on _memblockimplbase=%p decremented to %ld\n", this, m_refCount );
#endif
if( m_refCount == 0 )
{
Derived::destroy( static_cast<Derived*>(this) );
}
}
};
// Each allocator points to an instance of _memblockimpl which
// contains the list of _memblock objects and other tracking info
// including a refcount.
// This object is instantiated in space obtained from the allocator
// implementation. The allocator implementation is the component
// on which allocate/deallocate are called to obtain storage from.
template< typename AllocatorImpl >
struct _memblockimpl : public _memblockimplbase<AllocatorImpl, _memblockimpl<AllocatorImpl> >
{
private:
typedef struct _memblockimplbase< AllocatorImpl, _memblockimpl<AllocatorImpl> > base_t;
friend struct _memblockimplbase< AllocatorImpl, _memblockimpl<AllocatorImpl> >;
// to get around some sticky access issues between Alloc<T1> and Alloc<T2> when sharing
// the implementation.
template <typename U, typename A, typename M >
fprintf(stdout,
"ref count on _memblockimplbase=%p decremented to %ld\n",
this,
m_refCount);
#endif
if (m_refCount == 0) {
Derived::destroy(static_cast<Derived*>(this));
}
}
};
// Each allocator points to an instance of _memblockimpl which
// contains the list of _memblock objects and other tracking info
// including a refcount.
// This object is instantiated in space obtained from the allocator
// implementation. The allocator implementation is the component
// on which allocate/deallocate are called to obtain storage from.
template<typename AllocatorImpl>
struct _memblockimpl
: public _memblockimplbase<AllocatorImpl, _memblockimpl<AllocatorImpl> > {
private:
typedef struct _memblockimplbase<AllocatorImpl,
_memblockimpl<AllocatorImpl> >
base_t;
friend struct _memblockimplbase<AllocatorImpl,
_memblockimpl<AllocatorImpl> >;
// to get around some sticky access issues between Alloc<T1> and Alloc<T2>
// when sharing the implementation.
template<typename U, typename A, typename M>
friend class Alloc;
template< typename T >
static void assign( const Alloc<T,AllocatorImpl, _memblockimpl<AllocatorImpl> >& src,
_memblockimpl *& dest );
static _memblockimpl<AllocatorImpl> * create( size_t defaultSize, AllocatorImpl& alloc )
template<typename T>
static void assign(
const Alloc<T, AllocatorImpl, _memblockimpl<AllocatorImpl> >& src,
_memblockimpl*& dest);
static _memblockimpl<AllocatorImpl>* create(size_t defaultSize,
AllocatorImpl& alloc)
{
return new ( alloc.allocate( sizeof( _memblockimpl ) ) ) _memblockimpl<AllocatorImpl>( defaultSize,
alloc );
return new (alloc.allocate(sizeof(_memblockimpl)))
_memblockimpl<AllocatorImpl>(defaultSize, alloc);
}
static void destroy( _memblockimpl<AllocatorImpl> * objToDestroy )
{
AllocatorImpl allocImpl = objToDestroy->m_alloc;
objToDestroy-> ~_memblockimpl<AllocatorImpl>();
allocImpl.deallocate( objToDestroy );
static void destroy(_memblockimpl<AllocatorImpl>* objToDestroy)
{
AllocatorImpl allocImpl = objToDestroy->m_alloc;
objToDestroy->~_memblockimpl<AllocatorImpl>();
allocImpl.deallocate(objToDestroy);
}
_memblockimpl( std::size_t defaultSize, AllocatorImpl& allocImpl ):
_memblockimplbase<AllocatorImpl, _memblockimpl<AllocatorImpl> >( defaultSize, allocImpl )
_memblockimpl(std::size_t defaultSize, AllocatorImpl& allocImpl)
: _memblockimplbase<AllocatorImpl, _memblockimpl<AllocatorImpl> >(
defaultSize, allocImpl)
{
#ifdef ARENA_ALLOC_DEBUG
fprintf( stdout, "_memblockimpl=%p constructed with default size=%ld\n", this,
base_t::m_defaultSize );
fprintf(stdout,
"_memblockimpl=%p constructed with default size=%ld\n",
this,
base_t::m_defaultSize);
#endif
}
~_memblockimpl( )
~_memblockimpl()
{
#ifdef ARENA_ALLOC_DEBUG
fprintf( stdout, "~memblockimpl() called on _memblockimpl=%p\n", this );
#endif
base_t::clear();
}
};
}
fprintf(stdout, "~memblockimpl() called on _memblockimpl=%p\n", this);
#endif
base_t::clear();
}
};
} // namespace ArenaAlloc
#endif

@ -275,7 +275,13 @@ struct json_path_handler : public json_path_handler_base {
template<typename T, typename U>
static inline U& get_field(T& input, std::shared_ptr<U>(T::*field))
{
return *(input.*field);
auto& ptr = input.*field;
if (ptr.get() == nullptr) {
ptr = std::make_shared<U>();
}
return *ptr;
}
template<typename T, typename U>

Loading…
Cancel
Save